blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52f918a11c4f8b68cbd12ff571b0f79cab91be4e | 1f2697ad292791a9f550422b5c7123382c79dc54 | /16sender.py | 3a0bf0426b4fbd111348bf0ff723610397eb4a5f | [] | no_license | wukainf/PyQt5- | 1ec2ced0a7775f0bb3aa5e17d462fb1aeff43fce | 31452fc9669b87f74f0b09ad57e45facdcee807e | refs/heads/master | 2021-01-11T20:18:44.495971 | 2017-01-20T10:03:03 | 2017-01-20T10:03:03 | 79,086,988 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QPushButton, QVBoxLayout, QHBoxLayout
class Myform(QMainWindow):
def __init__(self):
super(Myform, self).__init__()
self.initUI()
def initUI(self):
btn1 = QPushButton('Button1', self)
btn2 = QPushButton('Button2', self)
# 为按钮添加点击事件
btn1.clicked.connect(self.buttonClicked)
btn2.clicked.connect(self.buttonClicked)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(btn1)
hbox.addWidget(btn2)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
widget = QWidget()
widget.setLayout(vbox)
self.setCentralWidget(widget) # QMainWindow有自己的布局了,所以不能直接setLayout
self.resize(300, 200)
self.move(300, 300)
self.setWindowTitle('sender')
self.show()
self.statusBar()
def buttonClicked(self):
# 事件: 显示信号发出者
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Myform()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
2830b96e1dcb39f5d16d310bcf1aa83d9bbd7a2a | 4c23f3543384747dd3b30487f97dd0f32f0a111f | /apps/tasks/__init__.py | af192f7e82da3f6076f1517aa3c85fbff4de1a41 | [] | no_license | dmallinger/recaster | b57d9ec2ecd7182cc80e03eb63b26f64a7dc5a63 | 51ece9ab492b077f00809ac2680e4bbb8e0c355c | refs/heads/master | 2023-05-12T13:05:11.841044 | 2019-12-04T21:08:48 | 2019-12-04T21:08:48 | 193,429,724 | 0 | 0 | null | 2023-05-01T20:35:08 | 2019-06-24T03:47:52 | Python | UTF-8 | Python | false | false | 107 | py |
from .utils import require_cron_job, require_task_api_key
from .utils import add_task, get_task_arguments
| [
"[email protected]"
] | |
692e67ef96d970b4cb5b65e005a9ffccd3fd0742 | 1e300a3ec484086b870e560530f901e7515e12cc | /higgs.py | b1169a2bd39b77ba306e66ffb4af9c49df0ab6d0 | [] | no_license | samhiggs/ID3-decision-tree-machine-learning-algorithm | 0b75cf88d45f2ed1299008ba5879d43f0cc66c1e | 3b682d2ea16cd1f4f138f56d52978f191c96913b | refs/heads/master | 2022-01-09T16:50:12.258440 | 2019-05-06T09:02:30 | 2019-05-06T09:02:30 | 184,758,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,563 | py | import numpy as np
import pandas as pd
import unittest, random, math, os, sys
from matplotlib import pyplot as plt
class Node:
'''
The Node data structure is used to store the feature and
decision for each feature and decision of the binary tree
'''
def __init__(self, f, d):
self.feature = f
self.decision = d
self.children = []
def __str__(self):
return ''' Node
{:>7} : {:<4}
{:>7} : {:<4}
{:>7} : {:<4}
'''.format(\
'Feature', self.feature if self.feature is not None else 'NA',\
'Decision', self.decision if self.decision is not None else 'NA', \
'N Children', len(self.children) if self.children else 'False', \
)
def __eq__(self, other):
return(
self.__class__==other.__class__ and \
self.feature == other.feature and \
self.decision == other.decision and \
self.children == other.children \
)
data_surround = '\n{:{fill}{align}{width}}\n'
def ID3(d, n, data, header=None):
'''
ID3 builds a decision tree recursively. Assumes the data has no features
in the header. Features should be described in the
Parameters
d (int):
The max depth of the tree
n (int):
The maximum number of nodes
data (list):
n-dimensional dataset
Returns
root (Node):
The root of the tree of depth d.
'''
#Check to ensure the inputs are valid.
if d is None or not isinstance(d, int) or d < 1: raise Exception('d is not valid')
if n is None or not isinstance(n, int) or n < d : raise Exception('n is not valid')
if data is None: raise Exception('data is not valid')
#convert the dataset to a numpy array so we can use some of the app. methods..
try:
if not isinstance(data, np.ndarray):
data = np.asarray(data)
rows, cols = data.shape
if rows < 2 or cols < 2:
raise Exception('The dataset will not be useful as there are to few rows and/or columns')
except Exception as e:
print(e)
raise Exception('The data cannot be converted in into a numpy array ')
if header is None:
features = ['c_'+str(i) for i in range(cols-1)]
features.append('labels')
else:
features = header
#Setup tree
root = Node('root', 'root')
print(data_surround.format('Building Tree', fill='*', align='^', width=50))
buildTree(data, root, features)
return root
def buildTree(subset, node, features):
'''
buildTree is a helper function for the ID3 and will recursively build a tree.
The base cases are whether all the indices are the same and therefore cannot be
split further. In this case, it will return. The tree is built of the initial node,
therefore no return value is necessary.
WARNING: The dataset in the nodes are NOT changed, only the featureset is manipulated.
This is to avoid excess computation by copying a dataset everytime, instead it just
points to the one dataset but it's important to use the features as a source of truth.
Parameters
node (Node) :
the node for which children will be spawned
features (list) :
The indexed list of features which reduces each time a feature is split.
subset (ndarray):
an n-dim array representing a subset of the data which hasn't been analysed.
Return
No return value
'''
print(features,subset.shape)
if not isinstance(subset, np.ndarray): raise Exception('Must be a numpy array')
if not features or len(features) < 1: raise Exception('No features left.')
if node is None or node.children is None: raise Exception('No node or improperly created.')
if subset is None or subset.shape[0] < 1 or subset.shape[1] < 1 or len(features) > subset.shape[1]:
raise Exception('subset is not being read in correctly.')
labels = np.unique(subset[:,-1])
# Base case for if all labels are the same.
if len(labels) == 1:
leaf = Node(subset[0,-1], labels[0])
node.children.append(leaf)
return
# Base case for if we are at the end of the dataset
if len(features) == 1:
for cat in np.unique(subset[:,-1]):
leaf = Node(features[0], cat)
node.children.append(leaf)
return
# Make absolutely sure that we don't keep going
if len(features) == 0: raise Exception('Oops we should not have hit this...Check code!')
#Recursive Function given the best feature of the set (target feature)
max_idx = np.argmax([compute_gain(subset, f)[0] for f, _ in enumerate(features[:-1])])
feature = features.pop(max_idx)
for c in np.unique(subset[:, max_idx]):
#create a child node
child = Node(feature, c)
node.children.append(child)
#split the data
child_data = subset[subset[:,max_idx]==c]
child_data = np.concatenate((child_data[:,:max_idx], child_data[:,max_idx+1:]), axis=1)
buildTree(child_data, child, features)
return
#Helper for debugging.
def visualiseData(data):
rows, cols = data.shape
col_data = {}
for i in range(cols):
cat, counts = np.unique(data[:,i], return_counts=True)
decisions = [data[:,-1][data[:,i]==c] for c in cat]
decisions = [', '.join(d) for d in decisions]
col_data[i]={}
col_data[i].update({cat:{'count': c, 'decisions':d} for cat, c, d in zip(cat, counts, decisions)})
# col_data[header[i]].update({'Total':sum(counts)})
print(data_surround.format('Data Summary', fill='*', align='^', width=50))
print('n rows : {}, n cols : {}'.format(rows, cols))
print('Column categories and count:')
for k, v in col_data.items():
print('{:>10}: '.format(k))
for col_cats,val in v.items():
print('{:>16}: {:>2} : {:>2}'.format(col_cats, val['count'], val['decisions']))
print(data_surround.format('End Summary', fill='*', align='^', width=50))
print(data_surround.format('Visualise The Data', fill='*', align='^', width=50))
def print_tree(root):
nodes = [[root, root]]
width, next_width = 1, 0
depth = 0
while len(nodes) > 0:
n, parent = nodes.pop(0)
if width == 0:
depth += 1
width = next_width
next_width = 0
width -= 1
if len(n.children) > 0:
next_width+= len(n.children)
nodes.extend([[child, n] for child in n.children])
p = depth*2
print(f"{'':^{p}} Parent {parent.feature} : {parent.decision}")
print(f"{'':^{p}}{n}")
return
#Complete and working
def compute_gain(S, i):
'''
Gain computation by splitting the set across the ith index using the entropy calculations
Parameters:
S (n-dim array): The dataset that you wish to calculate the information gain on, must
be at least 2 dimensions with the labels on the final column.
i (int) : The index of the column.
Return:
gain (float) : The difference between the previous and new entropy
'''
if not isinstance(S, np.ndarray): S = np.asarray(S)
rows, cols = S.shape
if cols < 2: return -1
if i-1 > cols: return -1
subset = S[:,[i,-1]]
rows, cols = subset.shape
total_entropy = entropy(subset[:,-1])
categories = np.unique(subset[:,0])
divided_S = [subset[subset[:,0]==c] for c in categories]
entropies = [entropy(div_s[:,-1]) for div_s in divided_S]
props = [len(div_s)/rows for div_s in divided_S] #count/rows for each category for each column
combined = sum([x*y for x,y in zip(props, entropies)])
return (total_entropy - combined), categories
#Complete and working
def entropy(S):
'''
Calculate the entropy of a dataset across label l
Parameters:
S (1-dim array): The dataset that you wish to calculate the entropy on, must be at lest 1 dimension
Returns:
entropy (float): The entropy of the column rounded to 6d.p
'''
if not isinstance(S, np.ndarray): S = np.asarray(S)
rows = S.shape
if len(rows) is not 1: return -1
categories, counts = np.unique(S, return_counts=True)
cat_cnt = dict(zip(categories, counts))
entropy = -sum((cat_cnt[cat]/rows)*math.log((cat_cnt[cat]/rows), 2) for cat in categories)[0]
return round(entropy, 6)
#Randomly divide the data by the percentage split.
def split_data(data, split):
'''
split_data generates a random set of indices which then divide the training and test set
Parameters:
data (ndarray):
n-dim array that is being split
split (float):
The percentage split for example, .7 is 70% split
Return:
test, train (ndarray, ndarray):
two n-dim arrays with the appropriate split.
'''
largerSplit = split if split > .5 else 1 - split
training_set_is = random.sample(range(len(data)),int(len(data)*largerSplit))
test_set_is = [i for i in range(len(data)) if i not in training_set_is]
training_set = data[training_set_is, : ]
test_set = data[test_set_is, : ]
return training_set, test_set
def learning_curve(d, n, training_set, test_set):
'''
I ran out of time to implement this sadly, The function was starting to be written but I ran into mistakes so
removed it to ensure that it could run.
The implementation was going to limit the depth and bredth of the decision tree to d and n, choosing the best
gain greedily. It would fit the model using the training set, then predict using the test_set and apply a column of labels
It would then calculate the difference between the labels it predicted and the actual labels, and use this to measure
the Errors
'''
plot = ''
# you will probably need additional helper functions
return plot
test_features = ['color', 'softness', 'tasty',] #where tasty is the label
test_data = [
[0,0,0],
[0,0,0],
[0,0,0],
[1,0,0],
[0,1,0],
[0,1,0],
[0,1,0],
[1,1,1],
[1,1,1],
[1,1,1],
]
common_e = {
"one_half" : 1.0,
"one_third" : round(0.9182958340544896, 6),
'one_quarter' : round(0.8112781244591328, 6),
"two_fifths": round(0.9709505944546685, 6),
"one_fifth" : round(0.7219280948873623, 6),
"one_tenth" : round(0.4689955935892812,6)
}
class TestID3Functions(unittest.TestCase):
def test_entropy(self):
simpleData = [['a','orange'],['b','apple']]
data = np.array(simpleData)[:,0]
e = entropy(data)
print('entropy for simpleData is {}'.format(e))
self.assertEqual(e, 1)
def test_another_entropy(self):
simpleData = [['a','orange'], ['b', 'apple'], ['b', 'apple'],['b','apple']]
dataLeft = np.array(simpleData)[:,0]
dataRight = np.array(simpleData)[:,1]
el = entropy(dataLeft)
er = entropy(dataRight)
print('Entropy for simple data L is {}'.format(el))
print('Entropy for simple data R is {}'.format(er))
self.assertEqual(el, common_e['one_quarter'])
self.assertEqual(er, common_e['one_quarter'])
def test_numeric_entropy(self):
dataColOne = np.array(test_data)[:,0]
dataColTwo = np.array(test_data)[:,1]
dataColThree = np.array(test_data)[:,2]
entropies = [entropy(dataColOne), entropy(dataColTwo), entropy(dataColThree)]
print('Entropy for test data column 0 is {}'.format(entropies[0]))
print('Entropy for test data column 1 is {}'.format(entropies[1]))
print('Entropy for test data column 2(labels) is {}'.format(entropies[2]))
expected_o = [common_e['two_fifths'], common_e['two_fifths'], round(-((7/10)*math.log(7/10, 2) + (3/10)*math.log(3/10, 2)), 6)]
self.assertListEqual(entropies, expected_o)
def test_compute_gain(self):
uncertain_data = np.array([
[0,'a'],
[0, 'a'],
[0, 'b'],
[1, 'a'],
[1, 'b']
])
uncertain_to_certain = np.array([
[0,'a'],
[0,'a'],
[0,'a'],
[1,'b'],
[1,'b'],
[1,'b'],
])
uncertain_gain = compute_gain(uncertain_data, 0)
gain_col0 = compute_gain(test_data,0)
gain_col1 = compute_gain(test_data,1)
self.assertEqual(round(uncertain_gain,6), 0.019973)
self.assertEqual(round(compute_gain(uncertain_to_certain,0),6), 1)
self.assertEqual(round(gain_col0,6), .556780)
self.assertEqual(round(gain_col1, 6), .281291)
def test_split(self):
print('running test split.')
data = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
split = .3
train_set, test_set = split_data(data, split)
# print(train_set)
# print(test_set)
self.assertEqual(len(train_set), len(data)*.7)
self.assertEqual(len(test_set), len(data)*.3)
def test_ID3_temp_data(self):
play_tennis_data = np.array([
['Sunny', 'Hot', 'High', 'Weak', 'No'],
['Sunny', 'Mild', 'High', 'Weak', 'No'],
['Sunny', 'Mild', 'Normal', 'Strong', 'Yes'],
['Sunny', 'Cold', 'Normal', 'Weak', 'Yes'],
['Sunny', 'Hot', 'High', 'Strong', 'No'],
['Overcast', 'Hot', 'High', 'Weak', 'Yes'],
['Overcast', 'Cool', 'Normal', 'Strong', 'Yes'],
['Overcast', 'Mild', 'High', 'Strong', 'Yes'],
['Overcast', 'Hot', 'Normal', 'Weak', 'Yes'],
['Rain', 'Mild', 'High', 'Weak', 'Yes'],
['Rain', 'Cool', 'Normal', 'Strong', 'No'],
['Rain', 'Mild', 'Normal', 'Weak', 'Yes'],
['Rain', 'Mild', 'High', 'Strong', 'No']]
)
features = ['Outlook', 'Temperature', 'Humidity', \
'Wind', 'Decision'],
#Calculations made using wolframalpha
root_gain = round(0.96123660472287587, 6)
expected_gain = {
'Decision': root_gain,
'Outlook' : round(root_gain - ((4/13)*0.0 + (4/13)*1.0 + (5/13)*common_e['two_fifths']),6),
'Temperature': round(root_gain - ((1/13)*0.0 + (2/13)*1.0 + (4/13)*1.0 + (6/13)*common_e['one_third']), 6),
'Humidity' : round(root_gain - ((7/13)*round(0.98522813603425, 6) + (6/13)*round(0.65002242164835,6))),
'Wind' : round(root_gain - ((6/13)*1.0 + (7/13)*common_e['one_third']), 6)
}
root = ID3(3,3,play_tennis_data)
self.print_tree(root)
self.assertEqual(1, 1)
def print_tree(self, root):
nodes = [[root, root]]
width, next_width = 1, 0
depth = 0
while len(nodes) > 0:
n, parent = nodes.pop(0)
if width == 0:
depth += 1
width = next_width
next_width = 0
width -= 1
if len(n.children) > 0:
next_width+= len(n.children)
nodes.extend([[child, n] for child in n.children])
p = depth*2
print(f"{'':^{p}} Parent {parent.feature} : {parent.decision}")
print(f"{'':^{p}}{n}")
return
def test_tree_build_one_level_perfect_gain(self):
#Build tree to test.
data = np.asarray([
['sun', 'sun', 'sun', 'cloud', 'cloud'],
['go_outside', 'go_outside', 'go_outside', 'stay_indoors', 'stay_indoors']
])
data = data.T
root = ID3(6,6,data)
self.assertEqual(1, 1)
def test_simple_helper(self):
simple_d = np.array([
[0,1],
[0,1],
[1,0],
[1,0],
])
node = Node('root', 'root')
features = ['wind', 'label']
root = ID3(1,1,simple_d)
self.print_tree(root)
self.assertEqual(1,1)
def test_tree_two_level_imperfect_gain(self):
test_w_data = np.asarray([
['w','C','H',0],
['w','C','L',0],
['w','C','L',0],
['w','H','L',0],
['w','H','L',1],
['d','H','H',1],
['d','H','H',1],
['d','H','H',1],
['d','C','H',1],
['d','C','L',0],
])
root = ID3(2,3,test_w_data)
self.print_tree(root)
self.assertEqual(1,1)
if __name__ == '__main__':
#Testing functions
# unittest.main()
data_fn = 'data\house-votes-84.data'
names = [
'Class Name',
'handicapped-infants',
'water-project-cost-sharing',
'adoption-of-the-budget-resolution',
'physician-fee-freeze',
'el-salvador-aid',
'religious-groups-in-schools',
'anti-satellite-test-ban',
'aid-to-nicaraguan-contras',
'mx-missile',
'immigration',
'synfuels-corporation-cutback',
'education-spending',
'superfund-right-to-sue',
'crime',
'duty-free-exports',
'export-administration-act-south-africa',
]
names = [ name.replace(' ', '_').lower() for name in names]
data = pd.read_csv(data_fn,names=names )
data = data.replace('?', np.nan)
data = data.fillna(method='pad')
data = data.fillna('y')
n = names.pop(0)
names.append(n)
train, test = split_data(data.values, .7)
root = ID3(3,3,train, names)
print_tree(root) | [
"[email protected]"
] | |
a9c818e98a689655c4b0863e803ddf4d6e232f30 | d42dea822871be6027fadbf8b167be1c0b38d9c7 | /LinkedList/tempCodeRunnerFile.py | 5dd125a654093152da2a6c61508fa4529b8df816 | [] | no_license | siddhantprateek/Python-in-Practice | d8412c46dec57d512d8abd87cb0a33b71070c5ee | 0ad806f02fecb87de20078ef956f8e23bb38e342 | refs/heads/main | 2023-06-26T22:34:55.172882 | 2021-07-29T15:14:09 | 2021-07-29T15:14:09 | 354,875,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py |
permute("321") | [
"[email protected]"
] | |
20f71d3a82d7b30d19d33379d24bd499c1aaa086 | 3a53bcfa6bc3dd000e81946b483a838003991815 | /projects/migrations/0111_auto_20210610_1427.py | 9137bf2a24cc98808743983fb6b2f91fb3b2e035 | [
"MIT"
] | permissive | City-of-Helsinki/kaavapino | f15f221d89a0ba18f9ebca203afc6c1160a80f19 | d74d15c6bf565e8ea5f7a1221627d76f7459cb81 | refs/heads/development | 2023-08-26T14:16:03.980268 | 2023-08-25T11:07:10 | 2023-08-25T11:07:10 | 113,177,939 | 4 | 12 | MIT | 2023-08-30T12:52:21 | 2017-12-05T12:14:15 | Python | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.2.13 on 2021-06-10 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0110_ad_integration_model_fields'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='ad_data_key',
field=models.CharField(blank=True, choices=[('id', 'id'), ('name', 'name'), ('phone', 'phone'), ('email', 'email'), ('title', 'title'), ('office', 'office'), ('company', 'company')], max_length=7, null=True, verbose_name='AD user data key'),
),
]
| [
"[email protected]"
] | |
f79fd6689615bef49bb77d7471a18bbaf4a276e1 | 1da61d4a09ac9407b08a75377a1c506aee9dfd46 | /python/cfgmdl/unit.py | 71b5f461c579b1d769816bf12ebc7c3d9537cf7b | [
"BSD-3-Clause"
] | permissive | KIPAC/cfgmdl | 4e1db8eb9f07a0e676ed135454399e124f568786 | ea2903b51594ca1102f812c73ad77228fe51cc00 | refs/heads/master | 2023-03-22T12:05:53.344198 | 2021-03-03T21:51:08 | 2021-03-03T21:51:08 | 339,553,394 | 0 | 1 | BSD-3-Clause | 2021-03-03T21:51:09 | 2021-02-16T22:55:10 | Python | UTF-8 | Python | false | false | 1,242 | py | """Small module for unit converision"""
import numpy as np
class Unit:
"""
Object for handling unit conversions
"""
to_SI_dict = {}
def __init__(self, unit=None):
# Dictionary of SI unit conversions
# Check that passed unit is available
if unit is None:
self._SI = 1.
self._name = ''
return
if isinstance(unit, str):
if unit not in self.to_SI_dict:
raise KeyError("Passed unit '%s' not understood by Unit object" % (unit))
self._SI = self.to_SI_dict[unit]
self._name = unit
return
self._SI = float(unit)
self._name = "a.u."
@property
def name(self):
"""Return the units name"""
return self._name
def __call__(self, val):
"""Convert value to SI unit """
if val is None:
return None
return np.array(val) * self._SI
def inverse(self, val):
"""Convert value from SI unit """
if val is None:
return None
return np.array(val) / self._SI
@classmethod
def update(cls, a_dict):
"""Update the mapping of unit names"""
cls.to_SI_dict.update(a_dict)
| [
"[email protected]"
] | |
7c788e89c7f8e94cb8a65f127108617060777304 | 20d040a1a04d0a8cae892c8f90649cdf9101d512 | /RS_Fusion_Exp/Model/label/createLabel3.py | 4b5aadbc9585e8a31d8e7c205056a7edf27568d5 | [] | no_license | PotentialPie/RS_Fusion_exp | add4634d526049ebfc56e51e314fe0a9839100fc | 6c3a9b3f9c7d6d5834b8115223cae06ae8e016ae | refs/heads/master | 2022-03-27T16:58:14.104059 | 2020-01-13T08:58:49 | 2020-01-13T08:58:49 | 233,552,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,880 | py | import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
'''
255 255 255; % Background
0 0 0; % Roads
100 100 100; % Buildings
0 125 0; % Trees
0 255 0; % Grass
150 80 0; % Bare Soil
0 0 150; % Water
255 255 0; % Railways
150 150 255]; % Swimming Pools
'''
# img = Image.open('zh1_GT.jpg')
def get_label(arr):
# 读出来RGB顺序相反???
if (arr == [0, 0, 0]).all():
return 1
if (arr == [100, 100, 100]).all():
return 2
if (arr == [0, 125, 0]).all():
return 3
if (arr == [0, 255, 0]).all():
return 4
if (arr == [0, 80, 150]).all():
return 5
if (arr == [150, 0, 0]).all():
return 6
if (arr == [0, 255, 255]).all():
return 7
if (arr == [255, 150, 150]).all():
return 8
raise ValueError("存在其他类")
def mark_white(_w, _h):
iswhite[_w, _h] = 1
# print("{},{} is 白色".format(_w, _h))
point_dict['{}-{}'.format(_w, _h)] = -1
def is_white(_w, _h):
if (img_array[_w, _h, :] == [255, 255, 255]).all():
return True
else:
return False
def create_box(_w, _h):
global count
# 新建框
label_dict[count] = get_label(img_array[_w, _h, :])
point_dict['{}-{}'.format(_w, _h)] = count
# print('{}-{} is {} {}'.format(_w, _h, count, img_array[_w, _h, :]))
results.append([])
results[count].append([_w, _h])
count += 1
def point_add_box(_w, _h, dire):
# print(point_dict)
if dire == 'up':
_count = point_dict['{}-{}'.format(_w, _h - 1)]
point_dict['{}-{}'.format(_w, _h)] = _count
# print('{}-{} is {} {}'.format(_w, _h, count, img_array[_w, _h, :]))
results[_count].append([_w, _h])
elif dire == 'left':
_count = point_dict['{}-{}'.format(_w - 1, _h)]
point_dict['{}-{}'.format(_w, _h)] = _count
# print('{}-{} is {} {}'.format(_w, _h, count, img_array[_w, _h, :]))
results[_count].append([_w, _h])
elif dire == 'all':
_count_up = point_dict['{}-{}'.format(_w, _h - 1)]
_count_left = point_dict['{}-{}'.format(_w - 1, _h)]
point_dict['{}-{}'.format(_w, _h)] = _count_up
if _count_up == _count_left:
# 框也一样
results[_count_up].append([_w, _h])
else:
results[_count_up] += results[_count_left]
results[_count_up].append([_w, _h])
for point in results[_count_left]:
point_dict['{}-{}'.format(point[0], point[1])] = _count_up
# print('{}-{} is {} {}'.format(point[0], point[1], count, img_array[_w, _h, :]))
drop_index.append(_count_left)
def same_as(dire, _w, _h):
if dire == 'up':
if (img_array[_w, _h, :] == img_array[_w, _h - 1, :]).all():
return True
else:
return False
if dire == 'left':
if (img_array[_w, _h, :] == img_array[_w - 1, _h, :]).all():
return True
else:
return False
list = os.listdir('Zurich_dataset_v1.0/groundtruth')
for i,filename in enumerate(list):
file = filename[:-4]
filename = 'Zurich_dataset_v1.0/groundtruth/' + filename
img = cv2.imread(filename, -1)
img_array = np.array(img) # [:300, :300, :]
# print(np.array(img_array))
width = img_array.shape[0]
height = img_array.shape[1]
# print(img_array.shape)
count = 0 # 接下来是第几个框
label_dict = {} # 第几个框 count : 颜色RGB
point_dict = {} # 点坐标{x},{y}: 属于哪个框 -1 白色
results = [] # 哪个框的所有坐标
drop_index = []
iswhite = np.zeros((width, height)) # 是否是白色
for h in range(height):
for w in range(width):
print("{}/{} {}/{}".format(i+1,len(list),h * width + w, height * width))
if is_white(w, h):
mark_white(w, h)
continue
if w == 0 and h == 0:
create_box(w, h)
continue
if h == 0:
if same_as('left', w, h):
point_add_box(w, h, 'left')
else:
create_box(w, h)
continue
if w == 0:
if same_as('up', w, h):
point_add_box(w, h, 'up')
else:
create_box(w, h)
continue
if not same_as('up', w, h) and not same_as('left', w, h):
# 和上面 左边都不一样
# print("都不一样")
create_box(w, h)
elif same_as('up', w, h) and same_as('left', w, h):
# print("都一样")
# 都一样
point_add_box(w, h, 'all')
elif same_as('left', w, h):
# print("和左一样")
point_add_box(w, h, 'left')
elif same_as('up', w, h):
# print("和上一样")
point_add_box(w, h, 'up')
fig = plt.figure(figsize=(height / 100, width / 100))
ax = fig.add_subplot(111)
ax.imshow(img_array)
for i in range(count):
if i in drop_index:
continue
minx, miny = np.min(results[i], 0)
maxx, maxy = np.max(results[i], 0)
label = label_dict[i]
# x y 相反
rect = plt.Rectangle((miny, minx), (maxy - miny), (maxx - minx), fill=False, edgecolor='r')
ax.add_patch(rect)
# with open('../data/{}_bounding_box.txt'.format(file), 'a', encoding='utf-8') as f:
# f.write('{} {} {} {} {}\n'.format(minx, miny, maxx, maxy, label))
ax.set_xticks([])
ax.set_yticks([])
print("保存")
plt.savefig("result_{}.jpg".format(file))
plt.close('all')
| [
"[email protected]"
] | |
b1825f35c41ea617fb138ee56dd1a1879fc74f92 | 199395a506266cbdd1d6aba5fb2287fb6fb1d127 | /src/customer.py | 45c3df1b555411380f39d778424c2fb9b0b22326 | [] | no_license | heathercking/pub.py | 436569bfead928daf5a0bd43b90ba7a1f098cefc | 27d5add08219f4c40350b67daab416885a9fe8b5 | refs/heads/master | 2023-08-16T03:28:09.229600 | 2021-10-20T14:53:42 | 2021-10-20T14:53:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | class Customer:
def __init__(self, name, wallet):
self.name = name
self.wallet = wallet
def reduce_cash_from_wallet(self, amount):
self.wallet -= amount | [
"[email protected]"
] | |
84d207972b9e649fd138701b12838984cbc71550 | 80e1179f3cd9a356784223086d2bdadc6dd139d4 | /Files/st_feedback_query_auto.py | b108e4cb0927802955a27f7ae027656cb0f5d57f | [] | no_license | vivek3141/IntellitrakFeedbackTracker | 68eb5c6a963bd78481b77edc58823402a121d6c5 | fb92f1556a3ad270d2b5a47fed2d5671a354bab0 | refs/heads/master | 2021-08-30T12:32:16.951094 | 2017-12-18T00:22:58 | 2017-12-18T00:22:58 | 114,575,812 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,625 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'st_feedback_query.ui'
#
# Created: Tue Aug 2 15:15:23 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1110, 512)
MainWindow.setStyleSheet(_fromUtf8("background-color: rgb(255, 166, 57);"))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.tableView = QtGui.QTableView(self.centralwidget)
self.tableView.setGeometry(QtCore.QRect(10, 80, 1091, 371))
self.tableView.setStyleSheet(_fromUtf8("\n"
"background-color: rgb(255, 255, 255);"))
self.tableView.setObjectName(_fromUtf8("tableView"))
self.comboBox = QtGui.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(10, 40, 241, 22))
self.comboBox.setStyleSheet(_fromUtf8("background-color: rgb(255, 255, 255);"))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.pushButton_5 = QtGui.QPushButton(self.centralwidget)
self.pushButton_5.setGeometry(QtCore.QRect(1010, 20, 91, 51))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Berlin Sans FB"))
font.setPointSize(16)
font.setBold(False)
font.setWeight(50)
self.pushButton_5.setFont(font)
self.pushButton_5.setStyleSheet(_fromUtf8("background-color: rgb(255, 255, 255);"))
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 211, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Berlin Sans FB Demi"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setStyleSheet(_fromUtf8("color: rgb(255, 255, 255);"))
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(260, 10, 691, 61))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.cbx_helpme = QtGui.QCheckBox(self.verticalLayoutWidget)
self.cbx_helpme.setObjectName(_fromUtf8("cbx_helpme"))
self.verticalLayout.addWidget(self.cbx_helpme)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(301, 471, 291, 23))
self.pushButton.setStyleSheet(_fromUtf8("background-color: rgb(255, 255, 255);"))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_4 = QtGui.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(609, 471, 251, 23))
self.pushButton_4.setStyleSheet(_fromUtf8("background-color: rgb(255, 255, 255);"))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton_4, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Modify Feedback", None))
self.comboBox.setItemText(0, _translate("MainWindow", "All", None))
self.pushButton_5.setText(_translate("MainWindow", "Q [F10]", None))
self.label.setText(_translate("MainWindow", "Course", None))
self.cbx_helpme.setText(_translate("MainWindow", "Help Me with this window", None))
self.label_2.setText(_translate("MainWindow", "First Select the row by clicking on the number at the left of the row. Next press F1 or Select to edit the Feedback in a new form.", None))
self.label_3.setText(_translate("MainWindow", "Course Filter can be used to filter the rows in the below list by course names. Initially all courses are selected.Select the course and press F10", None))
self.pushButton.setText(_translate("MainWindow", "Select [F1]", None))
self.pushButton_4.setText(_translate("MainWindow", "Close", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
5d250d65c461d6fb64cf3e41e70a0c70796a948b | 9b442c80c497b29a67fbf13ae1e1c10f283175b0 | /src/main.py | 7a0051943a872c26a267ac80f6bd4a6c0fb8ec84 | [
"MIT"
] | permissive | lpsantil/TYOS | 0be1b8046fd2a7a1134491eaea363f958431a8b5 | 488579747d8e0b98920adb5c8c227348359969fa | refs/heads/master | 2021-10-02T17:58:14.367466 | 2018-11-29T22:39:25 | 2018-11-29T22:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,758 | py | #main.py
#Copyright (c) 2015 Tyler Spadgenske
#MIT License
'''
Usage:
If FONA is powered off, run sudo python /home/pi/tyos/src/main.py --power to turn module on and start TYOS.
If FONA is already on, just run sudo python /home/pi/tyos/src/main.py
Upgrade:
To check for updates go to https://github.com/spadgenske/TYOS/releases/latest and compare the version number with your
current version of TYOS. If higher, you can update. To get your version of TYOS run the command
sudo python /home/pi/tyos/src/main.py --version
'''
VERSION = '0.5.6'
#Set to True if you do not want the time modified off the FONA
USE_RAW_TIME = False
import pygame, sys, os, time, datetime, traceback, warnings
from pygame.locals import *
import framebuffer, toolbar, apps, serialport, receive
class tyos():
def __init__(self):
warnings.filterwarnings("ignore")
for arg in sys.argv:
if arg == '--power':
self.POWER_FONA = True
print 'Powering FONA on...'
else:
self.POWER_FONA = False
if arg == '--version':
print 'TYOS VERSION ' + VERSION
sys.exit()
self.VERSION = VERSION
if self.POWER_FONA:
import power
power.Power().toggle()
time.sleep(10)
#Setup fona
self.fona = serialport.SerialPort()
self.fona.connect()
self.set_audio()
#Setup some important objects
self.scope = framebuffer.pyscope()
self.toolbar = toolbar.Toolbar(self.fona)
self.apps = apps.App(self.fona)
self.reciever = receive.Receive(self.fona)
pygame.init()
#Setup surface
self.WINDOWWIDTH = 320
self.WINDOWHIEGHT = 480
self.surface = pygame.display.set_mode((self.WINDOWWIDTH, self.WINDOWHIEGHT), pygame.FULLSCREEN)
pygame.mouse.set_visible(False)
self.clock = pygame.time.Clock()
#Colors R G B
self.BLUE = ( 0, 0,255)
self.WHITE = (255,255,255)
self.BLACK = ( 0, 0, 0)
self.surface.fill(self.WHITE)
self.update = True
#Setup logo
self.logo = pygame.image.load('/home/pi/tyos/graphics/logo.png')
self.logo_rect = self.logo.get_rect()
self.logo_rect.y = self.surface.get_rect().centery - 50
self.logo_rect.centerx = self.surface.get_rect().centerx
#Setup Battery Icon
self.bat = pygame.image.load('/home/pi/tyos/graphics/bat.png')
self.bat_rect = self.bat.get_rect()
self.bat_rect.centery = 15
self.bat_rect.right = self.WINDOWWIDTH - 10
#Setup Low Battery Icon
self.low_bat = pygame.image.load('/home/pi/tyos/graphics/low_bat.png')
self.low_bat_rect = self.low_bat.get_rect()
self.low_bat_rect.centery = 380
self.low_bat_rect.centerx = self.surface.get_rect().centerx
#Setup App Toolbar
self.app_toolbar = pygame.Rect(0, 0, 320, 30)
#Rectangle Dictionary
self.rectangles = {'rects':[self.app_toolbar], 'colors':[self.BLACK]}
#Reception Rectangle dictionary
self.reception_bars = {'rects':[], 'colors':[]}
#Battery Left Text
self.bat_left = {'surface':self.toolbar.bat_left, 'rects':self.toolbar.bat_left_rect}
#Setup fonts
self.font = pygame.font.Font('/home/pi/tyos/fonts/liberation_sans.ttf', 20)
#Setup clock Text
self.clock_text = self.font.render('12:00', True, self.WHITE, self.BLACK)
self.clock_text_rect = self.clock_text.get_rect()
self.clock_text_rect.centerx = self.surface.get_rect().centerx
self.clock_text_rect.centery = 15
#Image Dictionary
self.images = {'surfaces':[self.bat], 'rects':[self.bat_rect, self.clock_text_rect]}
self.blit_logo = True
self.dead_bat = False
def set_audio(self):
#Set audio in/out to selected from config file
try: #See if config file exists
self.audio_file = open('/home/pi/tyos/configure/audio.conf', 'r')
except:
if not os.path.exists('/home/pi/tyos/configure'):#If configure directory doesn't exist, create one
os.mkdir('/home/pi/tyos/configure')
self.audio_file = open('/home/pi/tyos/configure/audio.conf', 'w+')#Create config file and add some lines
self.audio_file.write('#Audio config file\n')
self.audio_file.write('mode=1\n')
self.audio_file.close()
self.audio_file = open('/home/pi/tyos/configure/audio.conf', 'r')
file = self.audio_file.readlines()
for i in range(0, len(file)):#Parse file
if file[i][0] == '#':
pass
#Do Nothing. Line is comment
else:
file[i] = file[i].rstrip()
if 'mode' in file[i]: #Extract audio mode: 1=Built in, 0=External
mode = file[i]
mode = mode.split('=')
mode = mode[1]
self.fona.transmit('AT+CHFA=' + mode)
def blit_time(self):
#Convert to 12 hour time then blit it to surface
t = time.strftime("%H:%M")
if USE_RAW_TIME == False:
if int(t[0] + t[1]) > 12:
t = str(int(t[0] + t[1]) - 12) + t[-3:]
t = t.lstrip('0')
self.clock_text = self.font.render(t, True, self.WHITE, self.BLACK)
self.surface.blit(self.clock_text, self.images['rects'][1])
def home(self):
while True:
#handle events and clock
self.blit_time()
self.handle_events()
pygame.display.update()
self.clock.tick()
#Update battery and reception
self.reception_bars, self.bat_left, self.update, self.dead_bat = self.toolbar.clock(self.reception_bars, self.bat_left,
self.update, self.apps.pixel)
#Move images if necessary
self.update, self.images, self.rectangles, self.reception_bars, self.bat_left = self.apps.open(self.update, self.images,
self.rectangles, self.reception_bars,
self.bat_left)
#Open app if tapped
self.apps.open_app()
#Check for calls and sms
self.update = self.reciever.check(self.update)
#Close app if opened and call coming in
if self.reciever.call_coming:
self.apps.app_to_open = None
self.apps.blit_logo = True
#Update if necessary
if self.update:
self.blit(self.images, self.rectangles, self.reception_bars, self.bat_left)
self.update = False
def blit(self, surfaces, rects, reception, bat):
self.surface.fill(self.WHITE)
if self.apps.app_to_open != None:
self.blit_logo = False
#Blit images using one image but different rectangles
for i in self.apps.app_objects[self.apps.app_to_open].blit_one_surface['rects']:
self.surface.blit(self.apps.app_objects[self.apps.app_to_open].blit_one_surface['surface'], i)
#Blit images using multiple images and rectangles
for rect, surface in zip(self.apps.app_objects[self.apps.app_to_open].blit['rects'],
self.apps.app_objects[self.apps.app_to_open].blit['surfaces']):
self.surface.blit(surface, rect)
#Blit all rectangles
for rect, color in zip(rects['rects'], rects['colors']):
pygame.draw.rect(self.surface, color, rect)
#Blit all reception bars
for rect, color in zip(reception['rects'], reception['colors']):
pygame.draw.rect(self.surface, color, rect)
#Blit all images
for surface, rect in zip(surfaces['surfaces'], surfaces['rects']):
self.surface.blit(surface, rect)
#Blit battery Percentage
self.surface.blit(bat['surface'], bat['rects'])
#Blit logo
if self.apps.blit_logo:
self.surface.blit(self.logo, self.logo_rect)
if self.dead_bat:
self.surface.blit(self.low_bat, self.low_bat_rect)
if self.apps.logos['rects'][0].y != -50:
for surface, rect in zip(self.apps.logos['surfaces'], self.apps.logos['rects']):
self.surface.blit(surface, rect)
#Blit incoming call
if self.reciever.call_coming:
for surface, rect in zip(self.reciever.blit['surfaces'], self.reciever.blit['rects']):
self.surface.blit(surface, rect)
def handle_events(self):
for event in pygame.event.get():
self.update = True
self.apps.update_app = True
self.app_bar = self.apps.check(event)
if self.reciever.call_coming:
self.reciever.get_events(event)
phone = tyos()
try:
phone.home() #E.T Reference
except KeyboardInterrupt:
print
print 'Closing TYOS ' + phone.VERSION
if phone.POWER_FONA:
power.Power().toggle()
pygame.quit()
sys.exit()
except SystemExit:
pass
except:
print '******************************************'
print 'An Error Occured'
print 'Writing to log /home/pi/tyos/logs/tyos.log'
print '******************************************'
#If error occurs, save it to file
error = traceback.format_exc()
error_log = open('/home/pi/tyos/logs/tyos.log', 'w')
error_log.write(error)
| [
"[email protected]"
] | |
8db9c19422cd0fe35aecf9f8453a618861cc0cac | a6a8f68673e6f4b75dc66195c7c2d49ddce91e78 | /p10.py | a9558c8e645487d80ef592f0857eae341b8a1e2b | [] | no_license | ValerianClerc/project-euler | c4d8d6bf40ac892312e995880bfaf242743baa21 | d83bc741ff217cbc7f49b988bcce2760b2fe9844 | refs/heads/master | 2020-03-22T16:46:27.700892 | 2018-07-22T22:27:33 | 2018-07-22T22:27:33 | 140,350,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py |
def sieve():
n = 2000001
dict = {}
for i in range(2, n):
dict[i] = "u"
for p in range(2, n):
count = p
mult = 2
while count < n:
count = mult*p
dict[count] = "m"
mult = mult +1
ans = []
for i in range(2, n):
if(dict[i] == "u"):
ans.append(i)
print(sum(ans))
sieve()
# def sieve2():
# n = 2000001
# list = []
# for i in range(2, n):
# list.append(True)
# for p in range(2, n):
# count = p
# mult = 2
# while count < n:
# count = mult*p
# list[count] = False
# mult = mult + 1
# ans = []
# for i in range(2,n):
# if(list[i]):
# ans.append(i)
# print(sum(ans))
#
# sieve2()
| [
"[email protected]"
] | |
889cabb901e4edb3cf0c85083b58b1a30ccd1b95 | 230f757be00fddd13856727f9145675ea526f733 | /hello-world.py | a9e088e30f2fe75d421a8909accdce5d20cb3415 | [
"MIT"
] | permissive | khangnngo12/astr-119 | b425774641f7a018fe687f7aa1d9e34a255cb95a | 2728ae9b112f522bcafcd82ff151292d892a3097 | refs/heads/main | 2022-12-21T07:51:33.025197 | 2020-10-05T19:46:23 | 2020-10-05T19:46:23 | 300,378,934 | 0 | 0 | MIT | 2020-10-05T19:46:24 | 2020-10-01T18:14:33 | Python | UTF-8 | Python | false | false | 85 | py | #!/usr/bin/env python3
#this program will write
#Hello World!
print("Hello World!") | [
"[email protected]"
] | |
ffda1497cfd67bc0111688811bc86deb263485c3 | acd9166b0780d01045d055d0b187138deac53457 | /train.py | 423b34e420eb94585c1ecb63d11843fe90555b38 | [] | no_license | xiekuncn/Behavioral-Cloning | 41bbda4daebc8f96d08cce1932570b7b8837c10e | 17cd73db481620e5d99637c75c95ed0ebf2b3003 | refs/heads/master | 2021-01-19T19:32:06.794252 | 2017-04-17T15:22:37 | 2017-04-17T15:22:37 | 88,420,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,300 | py | #!/usr/bin/env python3
# _*_coding=utf8_*_
import os
import csv
from sklearn.model_selection import train_test_split
import cv2
import numpy as np
import sklearn
from keras.layers import Dense, Flatten, Lambda, Dropout, Cropping2D
from keras.layers.convolutional import Conv2D
import tensorflow as tf
from keras.models import Sequential
import matplotlib.pyplot as plt
import keras
def train(data_paths):
'''
train the model by the data providing from data_paths.
:param data_paths: it's a list of dataset folders.
the code will load the 'driving_log.csv' for each input folder,
and this csv file should record the image by relative path, *NOT absolute path*.
If the image is not exist, will raise a FileNotFound exception before training.
'''
batch_size = 128
if type(data_paths) is str:
data_paths = [data_paths]
train_samples, validation_samples = [], []
for data_path in data_paths:
train_data, validation_data = make_samples(data_path)
train_samples.extend(train_data)
validation_samples.extend(validation_data)
train_generator = generator(train_samples, batch_size)
validation_generator = generator(validation_samples, batch_size)
ch, row, col = 3, 160, 320 # Trimmed image format
model = Sequential()
model.add(Cropping2D(cropping=((40, 25), (0, 0)), input_shape=(row, col, ch)))
model.add(Lambda(lambda x: keras.layers.core.K.tf.image.resize_images(x, (66, 200)))) # resize image
model.add(Lambda(lambda x: x / 127.5 - 1.,
input_shape=(66, 200, 3),
output_shape=(66, 200, 3)))
model.add(Conv2D(kernel_size=(5, 5), filters=24, padding='valid', activation='relu', strides=(2, 2), use_bias=True))
model.add(Conv2D(kernel_size=(5, 5), filters=36, padding='valid', activation='relu', strides=(2, 2), use_bias=True))
model.add(Conv2D(kernel_size=(5, 5), filters=48, padding='valid', activation='relu', strides=(2, 2), use_bias=True))
model.add(Conv2D(kernel_size=(3, 3), filters=64, padding='valid', activation='relu', strides=(1, 1), use_bias=True))
model.add(Conv2D(kernel_size=(3, 3), filters=64, padding='valid', activation='relu', strides=(1, 1), use_bias=True))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='tanh'))
model.summary()
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator,
steps_per_epoch=len(train_samples),
validation_data=validation_generator,
validation_steps=len(validation_samples),
epochs=3)
model.save("model.h5")
print(history_object.history.keys())
### plot the training and validation loss for each epoch
# plt.plot(history_object.history['loss'])
# plt.plot(history_object.history['val_loss'])
# plt.title('model mean squared error loss')
# plt.ylabel('mean squared error loss')
# plt.xlabel('epoch')
# plt.legend(['training set', 'validation set'], loc='upper right')
# plt.show()
def make_samples(data_path):
'''
make samples for data_path folder.
and will return the training samples and validate sample by 4:1.
the data will augmentation by flip, it's a attribute in one sample.
:param data_path: it's a folder includes 'driving_log.csv' file.
:return training_samples, validate_samples.
'''
samples = []
csv_file = os.path.join(data_path, "driving_log.csv")
skip_line = True
with open(csv_file) as f:
reader = csv.reader(f)
for line in reader:
if skip_line:
skip_line = False
continue
for image_index in range(3):
path = "".join(line[image_index].split())
path = os.path.join(data_path, path)
if not os.path.exists(path):
raise FileNotFoundError(path)
line[image_index] = ''.join(path.split())
angle = float(line[3])
if image_index == 1:
angle += 0.229
elif image_index == 2:
angle -= 0.229
samples.append({"image": line[image_index],
"angle": angle,
"flip": False})
samples.append({"image": line[image_index],
"angle": angle,
"flip": True})
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
return train_samples, validation_samples
def generator(samples, batch_size=128):
'''
it's a generator for sampling.
:param samples: the whole datasets for training or validation
:param batch_size: batch size
:return: yield a batch sample
'''
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = batch_sample["image"]
image = cv2.imread(name)
if image is None or image.shape != (160, 320, 3):
continue
angle = float(batch_sample["angle"])
if batch_sample["flip"]:
images.append(np.fliplr(image))
angles.append(-angle)
else:
images.append(image)
angles.append(angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
if __name__ == "__main__":
train(['/dataset/pj3-1/', '/dataset/pj3-2/', '/dataset/pj3-origin/'])
| [
"[email protected]"
] | |
d4b7a0a364fdc667c3582d44384e6f48a247b0fe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03464/s336171115.py | 4f42109ae769c6dfb2831e74579d5c8aceb6e5c6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | #!/usr/bin/env python3
import sys, math, copy
# import fractions, itertools
# import numpy as np
# import scipy
HUGE = 2147483647
HUGEL = 9223372036854775807
ABC = "abcdefghijklmnopqrstuvwxyz"
def get_max_min_mod(a, minp, maxp):
if minp % a == 0:
minp_new = (minp // a) * a
else:
minp_new = (minp // a + 1) * a
maxp_new = (maxp // a) * a
return minp_new, maxp_new
def main():
k = int(input())
ak = list(map(int, reversed(input().split())))
assert len(ak) == k
minp = maxp = 2
for j in range(k):
minp, maxp = get_max_min_mod(ak[j], minp, maxp)
if minp > maxp:
print(-1)
sys.exit(0)
maxp += ak[j] - 1
print(minp, maxp)
main()
| [
"[email protected]"
] | |
691b805c06257ec4860f40bf70e4d6899007590d | 7c5d3246d866379dc7b6a7152ac61ae5a18b067c | /Figure_2.py | 4b3e2eb844ee743bceedc1b0dd130c984302ced0 | [
"Apache-2.0"
] | permissive | sunatthegilddotcom/Data-Driven-PV | c601a3c66a81cedf64b652393419a137cebf8b51 | 4f06d861948de4088131ef72fbab80521140cd8a | refs/heads/master | 2022-11-24T15:51:30.779365 | 2020-07-30T08:11:05 | 2020-07-30T08:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,155 | py | # -*- coding: utf-8 -*-
#Load libraries
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.stats import lognorm
# Specify font used in plots
font = 'Adobe Myungjo Std'
math_font = 'cm'
#Cutoff function
def voef(cut, slope, eff):
if eff < cut:
return 0
else:
return (eff*slope)
def voef_exp(cut, slope, eff, exp):
if eff < cut:
return 0
else:
return (slope * np.power(eff,exp))
def make_plots(efficiencies, share_max, axs=False):
cuts = np.linspace(8, 13, 100)
slopes = np.linspace(0.5, 10, 100)
distributions = []
total_rev =[]
distributions_exp = []
total_rev_exp =[]
for slope in slopes:
for cut in cuts:
#Compute revenue
rev = np.zeros((len(efficiencies),1))
cut = cut
slope = slope
for i, sample in enumerate(efficiencies):
rev[i] = voef(cut, slope, sample)
distributions.append(rev)
total_rev.append([np.sum(rev)/len(efficiencies), cut, slope])
total_rev_def = pd.DataFrame(total_rev, columns = ['Total Revenue', 'n_c', 'k'])
cuts_n = cuts / np.mean(cuts)
X, Y = np.meshgrid(cuts_n, slopes)
#fig,ax = plt.subplots(1,1)
if axs:
ax = axs
z= total_rev_def['Total Revenue']
Z= z.values.reshape(100,100)
levels = np.linspace(0, share_max, 10)
cp = ax.contourf(X, Y, Z, cmap='viridis', levels = levels)
#cp.ax.set_title('$/m^2', size=20)
clb = plt.colorbar(cp, ax=axs, format='%.1f') # Add a colorbar to a plot
clb.ax.set_title(r'$\$ / m^2$', fontname=font)
#ax.set_title('Total Revenue', size=20)
ax.set_xlabel(r'Ratio $\eta_c / \eta _{mean}$', size=18, fontname=font)
ax.set_ylabel(r'$k$ [$\$ \: W^{-1} m^{-2}$]', size=18, fontname=font)
# Set the font name for axis tick labels
for tick in ax.get_xticklabels():
tick.set_fontname(font)
for tick in ax.get_yticklabels():
tick.set_fontname(font)
return total_rev_def, X, Y, Z
#%%
#LOAD DATA
JV_exp = np.loadtxt('perov_JV_exp.txt',delimiter=',')
JV_exp = JV_exp
v_sweep = np.linspace (0,1.2,100)
power_exp= JV_exp[:,100*2:100*3]*v_sweep
eff_exp = np.max(power_exp, axis=1)/0.98
exp_condition = pd.read_excel('prcess_label.xlsx',index_col=0)
exp_condition = exp_condition.values
#Stack data and order
X_data = np.concatenate([eff_exp.reshape(-1,1), exp_condition],axis= 1)
p_index = []
X_data_re=[]
for i in [70,90,110,130]:
for j in [2,4,8]:
idx = np.intersect1d(np.where(X_data[:,1]==i) ,np.where(X_data[:,2]==j))
X_data_re.append(X_data[idx,:])
X_data_re = np.vstack(X_data_re)
#Remove data to have same # of samples:
X_data_re = np.delete(X_data_re, [0,15,21,13,14,10,12,17,12,9,7,4], 0)
X_data_re = np.insert(X_data_re, 36, [3.88, 90, 2], axis=0)
X_data_re = np.delete(X_data_re, [106,107,108,96,110,112], 0)
X_data_re = np.insert(X_data_re, 143, [5.77, 130, 8], axis=0)
#make_plots(X_data_re)
#%%
# Histogram of data
#plt.hist(X_data_re[:,0],50)
# Compute efficiency and normalize
df_X1 = pd.DataFrame(X_data_re, columns=['Efficiency','Temperature','Ratio'])
df_X2 = df_X1.values
df_X2 = df_X2[df_X2[:,0]>2]
df_max = np.max(df_X2[:,0])
df_X2 = df_X2[:,0] / df_max
mean_zeroff = np.mean(df_X2)
std_zeroff = np.std(df_X2)
#plt.hist(df_X2, 50)
from scipy.stats import norm
logn_zero = norm(loc=mean_zeroff, scale = std_zeroff)
sample_zero = logn_zero.rvs(size=1500)
sample_zero = sample_zero[sample_zero < 20/ df_max]
z_i = np.random.randint(10, size=len(sample_zero))
z_i[z_i>0]=1
sample_z = z_i *(1-sample_zero)
sample_z [sample_z<0]=0
#plt.figure()
#plt.hist(sample_z * df_max,50)
#plt.axvline(x=np.mean(sample_zero)*df_max, color='r')
#plt.figure()
#plt.hist(X_data_re[:,0], 50)
#%%
#Lognormal
#logn = lognorm(s=std_zeroff, scale = (1-mean_zeroff)+ (std_zeroff**2 / 2))
#
#
#sample = logn.rvs(size=150000)
sample = np.random.lognormal(np.log(1-mean_zeroff), (std_zeroff), 1500 )
sample[sample>1]= 1
#plt.figure()
#plt.hist ((1-sample)*df_max,50)
mean = np.mean((1-sample))
#plt.axvline(x=mean*df_max, color='r')
#%%
#a = make_plots((sample_z * df_max), share_max = 80)
#b= make_plots(((1-sample)*df_max), share_max = 80)
#calc = []
#for i, sample in enumerate((1-sample)*df_max):
# calc.append(voef(9.5, 5, sample))
#
#res = np.sum(calc)
#%%
import matplotlib.style as style
#style.use('seaborn-white')
sns.set(style="white", context='talk')
fig, axes = plt.subplots(2, 2, figsize=(10,8))
import matplotlib
matplotlib.rcParams['font.family'] = font
matplotlib.rcParams['mathtext.fontset'] = math_font
#matplotlib.rcParams['font.size'] = 20
axes[0, 0].hist(sample_z * df_max, 50, density=True, color='mediumseagreen')
axes[0, 0].axvline(x=np.mean(sample_zero)*df_max, color='blue', linewidth=2)
#axes[0, 0].set_title('R&D Distribution', size=20)
axes[0, 0].set_xlabel(r'Solar Cell Efficiency $\eta$ [%]', size=18, fontname=font)
axes[0, 0].set_ylabel(r'Probability $p\:(\eta)$', size=18, fontname=font)
axes[0, 0].set_xlim(right=20)
axes[0, 1].hist((1-sample)*df_max, 50, density=True, color='mediumseagreen')
axes[0, 1].axvline(x=mean*df_max, color='blue', linewidth=2)
#axes[0, 1].set_title('Manufacturing Distribution', size=20)
axes[0, 1].set_xlabel(r'Solar Cell Efficiency $\eta$ [%]', size=18, fontname=font)
axes[0, 1].set_ylabel(r'Probability $p\:(\eta)$', size=18, fontname=font)
axes[0, 1].set_xlim(right=20)
# Set the font name for axis tick labels
for ax in axes.ravel():
for tick in ax.get_xticklabels():
tick.set_fontname(font)
for tick in ax.get_yticklabels():
tick.set_fontname(font)
make_plots((sample_z * df_max), share_max = 80, axs= axes[1,0])
make_plots(((1-sample)*df_max), share_max = 80, axs= axes[1,1])
fig.tight_layout()
plt.savefig('Fig2.png', dpi=1000)
#axes[1, 0].scatter(x, y)
#%%
#df_X = df_X1.copy()
#
#max_eff = df_X['Efficiency'].max()
#
## Normalize
#df_X['Efficiency'] = df_X['Efficiency'] / max_eff
#
## Get mean and variance for empirical distribution
#X_mean = df_X['Efficiency'].mean()
#eff_data = df_X['Efficiency']
#
#log_norm_var = eff_data.std()
#
#make_plots(X_data_re)
#plt.figure()
#plt.hist(X_data_re[:,0], 50)
#
#
#
##%%
## Lognormal distribution
#
#
#logn = lognorm(s=0.5*log_norm_var, scale = 0.5*(1-X_mean))
#sample = logn.rvs (size=1500)
#sample[sample>1]= 1
#plt.figure()
#plt.hist (1-sample,50)
#
##%%
#
##zero inflated lognormal
#logn_zero = norm(loc=0.5*log_norm_var, scale = 1.9*(1-X_mean))
#sample_zero = logn_zero.rvs(size=1500)
#
#z_i = np.random.randint(10, size=len(sample_zero))
#z_i[z_i>0]=1
#sample_z = z_i *(1-sample_zero)
#sample_z [sample_z<0]=0
#
#plt.figure()
#plt.hist(sample_z,50)
#
#
#
##%%
##Make data frames for plotting
#lognorm_df = 1-sample
#lognorm_df = lognorm_df.reshape(-1,1)
#lognorm_df = lognorm_df * max_eff
#a = make_plots(lognorm_df)
#
##Make data frames for plotting
#lognorm_zero = sample_z
#lognorm_zero = lognorm_zero.reshape(-1,1)
#lognorm_zero = lognorm_zero * max_eff
#b = make_plots(lognorm_zero)
#
#
#
##%%
#
#plt.figure()
#plt.hist(lognorm_df,50)
#plt.figure()
#plt.hist(lognorm_zero,50)
#
##%%
##
#
#
# sns.lineplot(x='k', y='Total Revenue', hue='n_c', data=total_rev_def, legend='full')
# for exponent in exponents:
# for cut in cuts:
# #Compute revenue
# rev_exp = np.zeros((len(efficiencies),1))
# cut = cut
# slope = fixed_slope
# exponent = exponent
#
# for i, sample in enumerate(efficiencies):
# rev_exp[i] = voef_exp(cut, slope, sample[0], exponent)
#
#
# distributions_exp.append(rev_exp)
# total_rev_exp.append([np.sum(rev_exp), cut, exponent])
#
# total_rev_def_exp = pd.DataFrame(total_rev_exp, columns = ['Total Revenue', 'n_c', 'Exp'])
#
# plt.figure()
# sns.lineplot(x='Exp', y='Total Revenue', hue='n_c', data=total_rev_def_exp, legend='full')
| [
"[email protected]"
] | |
84bb5d529654e19be0e8563ab99ea1631a73031d | 343ab5dde66ab0314acdc339c08973e62283631c | /home/urls.py | 0c523ced43ab2ff2fb122341228e826b4fbca388 | [] | no_license | pdolawat654/Hospital_Management | 45e400a9376aa3302ca500d20910859429417c5b | e9ee6fe9c6f6a659f69f0a70aaef875c73a8baf7 | refs/heads/master | 2022-10-18T19:08:37.741706 | 2020-06-12T12:35:42 | 2020-06-12T12:35:42 | 271,786,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from django.urls import path
from . import views
urlpatterns=[
path("",views.home,name='home'),
]
| [
"[email protected]"
] | |
bab8cf5bbb562bf33a113be171dba2455f98a23c | 198947a1082ff3855a76aabf48c057629ef2f3ef | /ex11.py | e9b25dc5d8be585da017f795fcf81e493912fb49 | [] | no_license | mskaru/LearnPythonHardWay | ec71a140bd6d7713629c97cf435b890679613d18 | 4838f2c38d2ceba954469363db1f98e8d2758a0f | refs/heads/master | 2016-08-04T13:26:09.818275 | 2014-02-09T10:14:36 | 2014-02-09T10:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
print int(age, base = 2) + int(height, base = 2)
| [
"[email protected]"
] | |
c69d3069c6f96a3344942716ceaee3ab21eb94f3 | 82ff359caee189eef3744f37dde84a81c8eadb60 | /rotate/test_mask.py | f705a5eafeede5814744f731ecca8710b46aec6d | [] | no_license | cycle13/Hagibis | a3e509f7dc4aafb9ff11de58dcce92c0443a2fca | 6c8237c75740070def1159ff6962ea0f4203136c | refs/heads/master | 2023-01-19T06:42:15.637110 | 2020-11-24T00:18:33 | 2020-11-24T00:18:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | from librotate import pi, dtheta, mask_lonlat, rotate_lonlat1d
import numpy as np
import matplotlib.pyplot as plt
lonc = 137.5
latc = 25.0
dcolat = 12.5
lon = np.arange(360)
lat = -90.0 + np.arange(180)
msk = mask_lonlat(dcolat, lonc, latc, lon, lat)
nmsk = np.sum(msk.astype(np.int))
print(nmsk)
mlon = np.zeros(nmsk)
mlat = np.zeros_like(mlon)
print(mlon.size)
k = 0
for j in range(lat.size):
for i in range(lon.size):
if msk[j, i]:
mlon[k] = lon[i]
mlat[k] = lat[j]
k += 1
print(mlon.min(),mlon.max())
print(mlat.min(),mlat.max())
lonnp, latnp = rotate_lonlat1d(lonc, latc, mlon, mlat, -1)
fig, ax = plt.subplots(2)
ax[0].scatter(mlon,mlat)
ax[0].scatter(lonc, latc, marker="*")
#ax.set_xlim([lon.min(),lon.max()])
ax[0].set_xlim([90.0,180.0])
#ax.set_ylim([lat.min(),lat.max()])
ax[0].set_ylim([-30.0,60.0])
ax[0].set_aspect("equal")
ax[0].set_xlabel("longitude")
ax[0].set_ylabel("latitude")
ax[1].scatter(lonnp, latnp, s=10)
ax[1].set_xlim([lon.min(),lon.max()])
ax[1].set_ylim([60.0,lat.max()])
ax[1].set_aspect("equal")
ax[1].set_xlabel("longitude")
ax[1].set_ylabel("latitude")
plt.show()
| [
"[email protected]"
] | |
2599514a359bcc1853bb1c1eb4591332d4afeb29 | ea6fcda29246a86bdd4b3fc19247b4a3e5ea97dd | /Python & Web Applications/2 Basics/Penetest/main.py | a27031b6bb5c330b86a70002910e7068b8f4a945 | [] | no_license | Abbalon/pythons_hacks | d0e4be96dec28bb29c823fd526873edbe58af9cc | 607e904fe7f4d0f4173937b2cb29e3067362fa07 | refs/heads/master | 2020-09-06T04:32:52.446304 | 2020-05-10T17:13:19 | 2020-05-10T17:13:19 | 220,322,905 | 0 | 0 | null | 2020-05-10T17:14:03 | 2019-11-07T20:25:47 | Ruby | UTF-8 | Python | false | false | 151 | py | import requests
response = requests.get('https://web.whatsapp.com/')
if(response.status_code == 200):
print("ok")
else:
print(response.text)
| [
"[email protected]"
] | |
c2a7eb9fb4d719a0c4eca6e4489e155ae0db2dcc | 7875dd778108c1aa51abb13d8e5603c82317b4fd | /src/natcap/__init__.py | d9510401c9b1311aab8bd9c9e3d209edef0c509d | [] | no_license | gaybro8777/rios-deprecated | 358b9117f91b6523109ef5a4bc098972f8be2c1b | 653109a14ad2ee9c01d9f41831aa39668a011916 | refs/heads/master | 2023-03-19T00:55:59.498872 | 2020-06-11T17:51:02 | 2020-06-11T17:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | """this is a namespace package for natcap"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
| [
"[email protected]"
] | |
ad37988f989e0ddb53dce8a9ed42adf25063d185 | bc9151c9437862b45dbbe3e491c2da0986736980 | /2.listas/challenge4_rouillonh.py | fa12aebd0d0fa23d04213b55202a9c23953177af | [
"MIT"
] | permissive | rouillonh/challenge-python | d285eb7ec081e2316bb0177d4448d8bfef653652 | 759adaf12f6112c5482841a81e53e8438d33b33e | refs/heads/main | 2023-09-06T02:14:53.946972 | 2021-11-24T18:23:26 | 2021-11-24T18:23:26 | 432,236,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | print("\tWelcome to the Basketball Roster Program\n")
#Pedimos los nombres de los jugadores a elección del usuario
pg = input("Who is your point guard: ").title()
sg = input("Who is your shooting guard: ").title()
sf = input("Who is your small forward: ").title()
pf = input("\nWho is your power forward: ").title()
c = input("Who is your center: ").title()
#Añadimos los jugadores a una nueva lista
roster = []
roster.append(pg)
roster.append(sg)
roster.append(sf)
roster.append(pf)
roster.append(c)
print("\n\tYour starting ",len(roster)," for the upcoming basketball season")
print("\t\tPoint guard: \t\t",pg)
print("\t\tShooting guard: \t",sg)
print("\t\tSmall forward: \t\t",sf)
print("\t\tPower forward: \t\t",pf)
print("\t\tCenter: \t\t",c)
#Removemos al jugador lesionado y añadimos a uno nuevo con la variable added_player
print("\nOh no, ",sf," is injured.")
roster.remove(sf)
injured_player = sf
print("Your roster only has ",len(roster)," players.")
added_player = input(f"Who will take {injured_player}'s spot: ").title()
roster.insert(2,added_player)
#Finalmente, mostramos la lista final del equipo
print("\n\tYour starting ",len(roster)," for the upcoming basketball season")
print("\t\tPoint guard: \t\t",pg)
print("\t\tShooting guard: \t",sg)
print("\t\tSmall forward: \t\t",added_player)
print("\t\tPower forward: \t\t",pf)
print("\t\tCenter: \t\t",c)
print("\nGood Luck ",roster[2]," you will do great!")
print("Your roster now has ",len(roster)," players.") | [
"[email protected]"
] | |
e7126106bb6c5d92c93880f93e7b077e87dfe9b9 | a37bbd659c17a29204983ef84dcfeeab30d70d0c | /_controllers/google/adsense.py | 5af84bc6c2247e0515870569e6c11e68766aa40c | [] | no_license | goosemo/blog | 613f2561c48c6d034f4c81ec93806c084fb36521 | ffa9738bf2d8bfa8498b1e9c0fac96b104dadb7e | refs/heads/master | 2021-01-19T20:28:02.554535 | 2014-02-27T07:04:34 | 2014-02-27T07:04:34 | 948,398 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py |
def run():
pass
| [
"[email protected]"
] | |
c389313c6aee0e663d05983f97db3c7d0b8d3209 | 6ee2b41e2f5468f6a1f2113b9505c60e97eb4349 | /Dotation_solidarite_urbaine.py | e54fcc7cf2126645af8a62e3173a4853daab3ca4 | [] | no_license | MikaelMonjour/simulation-dsu | 2b25db753318b14c1528a3bd2ed13aa68b9ac243 | d6054a01e38db87022dcbad46b3980da0eee1c01 | refs/heads/master | 2022-09-01T18:51:45.616649 | 2020-01-25T15:10:14 | 2020-01-25T15:10:14 | 236,148,109 | 0 | 1 | null | 2022-08-23T18:18:33 | 2020-01-25T09:10:50 | Python | UTF-8 | Python | false | false | 7,726 | py | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
choix_commune = input("Tu veux calculer la dotation 2019 pour quelle ville ? : ")
departement = input("Numero du département à deux chiffre ex 67 : ")
print("[+] Calcul pour une commune de plus de 10 000 habitants")
df = pd.read_csv("2019-communes-criteres-repartition.csv", decimal=",")
_nombre_de_villes_elligibles = len(
df["RANG_DSU_SUP_10K"].replace(0, np.nan).dropna()
) * (
2 / 3
) # Les deux tiers de 1032
# Selection des colonnes
df2 = df[
[
"Informations générales - Nom de la commune",
"Informations générales - Population DGF Année N'",
"Informations générales - Population INSEE Année N ",
"Informations générales - Code département de la commune",
"Dotation de solidarité urbaine - Nombre de bénéficiaires des aides au logement de la commune",
"Dotation de solidarité urbaine - Nombre de logements TH de la commune",
"Dotation de solidarité urbaine - Part des bénéficiaires d'aides au logement par rapport au nombre de logements des communes mét de plus de 10000 habitants",
"Dotation de solidarité urbaine - Nombre de logements sociaux de la commune",
"Dotation de solidarité urbaine - Part des logements sociaux dans le total des logements des communes métropolitaines de plus de 10000 habitants",
"Dotation de solidarité urbaine - Revenu imposable moyen par habitant des communes mét de plus de 10 000 habitants",
"Dotation de solidarité urbaine - Revenu imposable par habitant",
"Dotation de solidarité urbaine - Potentiel financier moyen par habitant des communes métropolitaines de plus de 10000 habitants",
"Potentiel fiscal et financier des communes - Potentiel financier par habitant",
"EFFORT_FISCAL", # Changer nom de colonne en production
"RANG_DSU_SUP_10K", # Changer nom de colonne en production
"RANG_DSU_5K_A_10K", # Changer nom de colonne en production
"Dotation de solidarité urbaine - Montant attribution spontanée DSU",
]
]
dfcity = df.loc[
(df["Informations générales - Nom de la commune"] == choix_commune)
& (
df["Informations générales - Code département de la commune"]
== int(departement)
)
]
if len(dfcity) == 0:
print(
f"[!] Il n'existe pas de commune {choix_commune} dans le {departement} - > Arrêt du programme !"
)
else:
population_dgf_commune = dfcity[
"Informations générales - Population DGF Année N'"
].values[0]
population_insee_commune = dfcity[
"Informations générales - Population INSEE Année N "
].values[0]
beneficiaires_aide_au_logement_commune = dfcity[
"Dotation de solidarité urbaine - Nombre de bénéficiaires des aides au logement de la commune"
].values[0]
leffort_fiscal = dfcity["EFFORT_FISCAL"].values[0]
dsu_annee_precedente = dfcity[
"Dotation de solidarité urbaine - Montant attribution spontanée DSU"
].values[0]
print(f"[+] {population_insee_commune}")
if population_insee_commune > 10000:
rang = dfcity["RANG_DSU_SUP_10K"].values[0]
_pfi_reference_10000 = 1292.66 # Potentiel Financier de référence au niveau national communes > 10K
pfi_commune = dfcity[
"Potentiel fiscal et financier des communes - Potentiel financier par habitant"
].values[
0
] # Potentiel financier de la commune pour laquelle on calcule la dotation
ecart_potentiel_financier_par_hab = _pfi_reference_10000 / pfi_commune
_ri_reference_10000 = (
15396.50 # Le revenu imposable par habitant commune plus de 10K hab
)
ri_commune = dfcity[
"Dotation de solidarité urbaine - Revenu imposable par habitant"
].values[
0
] # Le revenu imposable par habitant de la commune
ecart_revenu_imposable_par_hab = _ri_reference_10000 / ri_commune
_part_des_logement_sociaux_plus_de_10000 = 0.232031
part_des_logement_sociaux_de_la_commune = (
dfcity[
"Dotation de solidarité urbaine - Nombre de logements sociaux de la commune"
].values[0]
/ dfcity[
"Dotation de solidarité urbaine - Nombre de logements TH de la commune"
].values[0]
)
ecart_de_pourcentage_de_logements_sociaux = (
part_des_logement_sociaux_de_la_commune
/ _part_des_logement_sociaux_plus_de_10000
)
_part_des_allocations_logements_plus_de_10000 = 0.515391
part_des_allocations_logements_commune = (
dfcity[
"Dotation de solidarité urbaine - Nombre de bénéficiaires des aides au logement de la commune"
].values[0]
/ dfcity[
"Dotation de solidarité urbaine - Nombre de logements TH de la commune"
].values[0]
)
ecart_de_pourcentage_allocation_logement = (
part_des_allocations_logements_commune
/ _part_des_allocations_logements_plus_de_10000
)
ponderation_potentiel_financier = 0.30 # Possibilité de changé via amendement
ponderation_revenu_imposable = 0.25
ponderation_logement_sociaux = 0.15
ponderation_allocation_logement = 0.30
c1 = ecart_potentiel_financier_par_hab * ponderation_potentiel_financier
c2 = ecart_revenu_imposable_par_hab * ponderation_revenu_imposable
c3 = ecart_de_pourcentage_de_logements_sociaux * ponderation_logement_sociaux
c4 = ecart_de_pourcentage_allocation_logement * ponderation_allocation_logement
indice_synthetique = c1 + c2 + c3 + c4
rang_de_la_commune = rang # Par rapport à l'indice synthétique RENNES (diiférent en fonction de la commune)
numerateur_coeff_rang = (
(3.5 * rang_de_la_commune) + 0.5 - (4 * _nombre_de_villes_elligibles)
)
denominateur_coeff_rang = 1 - _nombre_de_villes_elligibles
coefficient_de_rang = numerateur_coeff_rang / denominateur_coeff_rang
population_insee_de_la_commune = population_insee_commune
population_qpv_de_la_commune = beneficiaires_aide_au_logement_commune
coefficient_qpv = 1 + 2 * (
population_qpv_de_la_commune / population_insee_de_la_commune
)
# ESsayer de retouver le calcul de la valeur de point
_valeur_de_point = (
0.57362212 # Modification en fonction des critères de dessus (POUR 2019)
)
population_dgf = population_dgf_commune # Dans fichier DGCL
effort_fiscal_de_la_commune = (
leffort_fiscal if leffort_fiscal < 1.3 else 1.3
) # Dans fichier DGCL - Plafond de 1.3
montant_abondement = (
indice_synthetique
* population_dgf
* effort_fiscal_de_la_commune
* coefficient_de_rang
* coefficient_qpv
* _valeur_de_point
)
DSU2019 = dsu_annee_precedente + montant_abondement
print(f"DSU 2019 : {DSU2019}")
df = pd.DataFrame(
{"": [montant_abondement, dsu_annee_precedente]},
index=["Abondement", "DSU N-1"],
)
plot = df.plot.pie(y="", title="TOTAL DSU", figsize=(10, 6))
fig = plot.get_figure()
fig.savefig("figure.png", dpi=300)
elif population_insee_commune > 5000 and population_insee_commune < 10000:
rang = dfcity["RANG_DSU_5K_A_10K"].values[0]
print("Ville de 5K Habitants")
else:
print("Moins de 5 000 habitants")
| [
"[email protected]"
] | |
cfdafc9e200313d08f4693b01ef1b3ed12d4a8fd | 67320a95b746f76a6ecbacb7e323aef0c4b8c4bd | /Diter_Delivery/FS/FS_billing_amount.py | 0b8b403f900c89db97092363f4fabd921474a8cc | [] | no_license | EkaterinaDanilicheva/Project_ivc | 68155be84912fe2af3c4caef088ce92d81a13dba | f7de6431f7fef4220b8ba2198e0974d32af450ae | refs/heads/master | 2021-01-23T16:43:50.739155 | 2017-09-07T14:01:08 | 2017-09-07T14:01:08 | 102,743,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | # -*- coding: utf-8
import MySQLdb
import string
import mysql.connector
from mysql.connector import errorcode
import logging
logging.basicConfig(filename='FS_billing_amount.log', format='%(asctime)s %(message)s',level=logging.INFO)
# billing19_002 подключаемся к базе данных (не забываем указать кодировку, а то в базу запишутся иероглифы)
config = {
'user': 'tariff',
'password': 'TrubKakuRa',
'host': '81.19.128.73',
'database': 'billing19_002',
'raise_on_warnings': True,
}
try:
billing_db = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
logging.critical("Something is wrong with your user name or password.")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
logging.critical("Database does not exist.")
else:
logging.critical(err)
exit()
logging.info("billing MySQL connected.")
# формируем курсор, с помощью которого можно исполнять SQL-запросы
billing_cursor = billing_db.cursor()
# freeswitch подключаемся к базе данных (не забываем указать кодировку, а то в базу запишутся иероглифы)
config = {
'user': 'portuser',
'password': 'TrubKakuRa',
'host': '81.19.142.2',
'database': 'freeswitch',
'raise_on_warnings': True,
}
try:
freeswitch_db = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
logging.critical("freeswitch_db:Something is wrong with your user name or password.")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
logging.critical("freeswitch_db: Database does not exist.")
else:
logging.critical(err)
exit()
logging.info("freeswitch MySQL connected.")
freeswitch_cursor = freeswitch_db.cursor()
#IS NULL
freeswitch_sql = "SELECT start_stamp, uuid FROM `cdr` WHERE `cdr`.`billing_amount` = 0 AND `cdr`.`billsec`>0 AND `cdr`.`billing_number` LIKE '7__________'"
# исполняем SQL-запрос
freeswitch_cursor.execute(freeswitch_sql)
# получаем результат выполнения запроса
freeswitch_uuid_arr = freeswitch_cursor.fetchall()
# перебираем записи
for freeswitch_uuid in freeswitch_uuid_arr:
# извлекаем данные из записей - в том же порядке, как и в SQL-запросе
start_stamp, uuid = freeswitch_uuid
fs_tel_table = "tel029" + start_stamp.strftime("%Y%m%d")
billing_sql = "SELECT amount FROM `"+ fs_tel_table +"` WHERE timefrom = '"+ str(start_stamp) +"' AND session_id = '"+ uuid +"'"
# исполняем SQL-запрос
billing_cursor.execute(billing_sql)
# получаем результат выполнения запроса
amount = billing_cursor.fetchall()
billing_amount = amount[0][0]
# update cdr на freeswitch добавляем billing_amount
update_sql = "UPDATE `cdr` SET `billing_amount` = '"+ str(billing_amount) +"' WHERE `start_stamp` = '"+ str(start_stamp) +"' AND `uuid` = '"+ uuid +"'"
# исполняем SQL-запрос
freeswitch_cursor.execute(update_sql)
freeswitch_db.commit()
print (update_sql)
# закрываем соединение с базой данных
freeswitch_db.close()
billing_db.close()
| [
"[email protected]"
] | |
98308daf2f91ba233841fae32d27a2e2d09d2207 | 04843a11bce1247bdcd55526d5e29732d5d2a2e6 | /mysite/homepage/models.py | a8ff8302526dd5f66fdadd0e25750365f5911aa2 | [] | no_license | mapleyustat/WorkZone | eef19a231a94e18b127e98461756b6ff5a8fe95c | 9d44d07b9d1ae82a3f6a03ef3d707346aae7907c | refs/heads/master | 2020-12-28T09:32:47.463950 | 2014-12-02T09:59:36 | 2014-12-02T09:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | from django.db import models
class ThoughtForTheDay(models.Model):
thought_text = models.CharField(max_length=255)
posted_date = models.DateTimeField('date posted')
class ThoughtOpinion(models.Model):
thought_text = models.ForeignKey(ThoughtForTheDay)
up_votes = models.IntegerField(default=0)
down_votes = models.IntegerField(default=0) | [
"[email protected]"
] | |
ecc759b1d1ddc6f3dbcc08c45f09cc1e521cae4e | 276ab3dc4742f2eab923ab4864923da996e9d994 | /kats/tests/models/test_arima_model.py | 6faba886ab669733da30466d81086dd43b8ee3f6 | [
"MIT"
] | permissive | hyh123a/Kats | 071f2a7c80594720dfbfa300a5c279a3ec18266c | f8b3a9603dcb9014f08e83739343124cd4c32ed5 | refs/heads/main | 2023-07-15T02:08:12.013987 | 2021-09-02T08:24:19 | 2021-09-02T08:24:19 | 402,340,132 | 0 | 0 | MIT | 2021-09-02T08:06:25 | 2021-09-02T08:06:24 | null | UTF-8 | Python | false | false | 3,143 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
import unittest
from unittest import TestCase
import pandas as pd
from kats.consts import TimeSeriesData
from kats.models.arima import ARIMAModel, ARIMAParams
def load_data(file_name):
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
class ARIMAModelTest(TestCase):
def setUp(self):
DATA = load_data("air_passengers.csv")
DATA.columns = ["time", "y"]
self.TSData = TimeSeriesData(DATA)
DATA_daily = load_data("peyton_manning.csv")
DATA_daily.columns = ["time", "y"]
self.TSData_daily = TimeSeriesData(DATA_daily)
DATA_multi = load_data("multivariate_anomaly_simulated_data.csv")
self.TSData_multi = TimeSeriesData(DATA_multi)
def test_fit_forecast(self) -> None:
params = ARIMAParams(p=1, d=1, q=1)
m = ARIMAModel(data=self.TSData, params=params)
m.fit(
start_params=None,
transparams=True,
method="css-mle",
trend="c",
solver="lbfgs",
maxiter=500,
# pyre-fixme[6]: Expected `bool` for 7th param but got `int`.
full_output=1,
disp=False,
callback=None,
start_ar_lags=None,
)
m.predict(steps=30)
m.plot()
m_daily = ARIMAModel(data=self.TSData_daily, params=params)
m_daily.fit()
m_daily.predict(steps=30, include_history=True)
m.plot()
def test_others(self) -> None:
params = ARIMAParams(p=1, d=1, q=1)
params.validate_params()
m = ARIMAModel(data=self.TSData, params=params)
# test __str__ method
self.assertEqual(m.__str__(), "ARIMA")
# test input error
self.assertRaises(
ValueError,
ARIMAModel,
self.TSData_multi,
params,
)
# test search space
self.assertEqual(
m.get_parameter_search_space(),
[
{
"name": "p",
"type": "choice",
"values": list(range(1, 6)),
"value_type": "int",
"is_ordered": True,
},
{
"name": "d",
"type": "choice",
"values": list(range(1, 3)),
"value_type": "int",
"is_ordered": True,
},
{
"name": "q",
"type": "choice",
"values": list(range(1, 6)),
"value_type": "int",
"is_ordered": True,
},
],
)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
251259e6477fbef29aae030289f231724f2d6123 | a608fc3bdef273edc87eb707e294341323c8152e | /config.py | 7d61124e0c10730dd251e84480ad78a91af6a945 | [] | no_license | Hualin/prams | 1faba53b204b3ccee8c8cc03115d2f5139b88f91 | 467483fc070acf5cfe971233be1731e024808cdf | refs/heads/master | 2020-05-19T17:30:03.925334 | 2013-05-16T00:57:51 | 2013-05-16T00:57:51 | 10,082,749 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | # configuration
DEBUG = True
SECRET_KEY = 'development key'
SQLALCHEMY_DATABASE_URI = 'sqlite:///prams.db' | [
"[email protected]"
] | |
4e11f1bdd89a47a0d95835f7605481daba0f366e | 924224d9b9b86092f63eb54476f3886adc008a90 | /helper.py | 7bc5b05b5b7760a0c1cd7bccdecf19e70b57a6ab | [] | no_license | pfespada/Madrid-AirBnB-Analysis | 8e12417aff86cca41d580eb18e0c4e0f84f85ef2 | 2de0903c929dc9935deed8a98055ad82809f19bc | refs/heads/master | 2020-05-01T16:16:12.640504 | 2019-03-26T11:17:32 | 2019-03-26T11:17:32 | 177,568,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,137 | py | # Investigate the variance accounted for by each principal component.
#function to plot the principal components as well as the cumulative variance
def scree_plot(pca):
'''
Creates a scree plot associated with the principal components
INPUT: pca - the result of instantian of PCA in scikit learn
OUTPUT:
None
'''
num_components=len(pca.explained_variance_ratio_)
ind = np.arange(num_components)
vals = pca.explained_variance_ratio_
plt.figure(figsize=(20, 15))
ax = plt.subplot(111)
cumvals = np.cumsum(vals)
ax.bar(ind, vals)
ax.plot(ind, cumvals)
count=0
for i in range(num_components):
count+=1
ax.annotate(r"%s%%" % ((str(vals[i]*100)[:4])), (ind[i]+0.2, vals[i]), va="bottom", ha="center", fontsize=12)
if count==3:
break
ax.xaxis.set_tick_params(width=0)
ax.yaxis.set_tick_params(width=2, length=12)
ax.set_xlabel("Principal Component")
ax.set_ylabel("Variance Explained (%)")
plt.title('Explained Variance Per Principal Component')
# function to fill the NaN values using the mean
def impute_missing(df, col):
"""
HERE YOU SHOULD BRIEFLY DESCRIBE WHAT THE FUNCTION COMPUTES
Args: Data frame and column name
.....
Returns: The data frame with the missing values converted to the mean()
....
"""
return df[col].fillna(df[col].mean(),inplace=True)
#funtion to get R2 score in train and test data
def quick_val (model):
'''
Creates a scree plot associated with the principal components
INPUT: model to be validated
OUTPUT:
R2 result for train and test data
'''
train_predict = model.predict(X_train)
test_predict = model.predict(X_test)
train_score = r2_score(y_train, train_predict)
test_score = r2_score(y_test, test_predict)
return print("In the model {}, The rsquared on the training data was {} and the rsquared on the test data was {}.".format(type(model).__name__,train_score, test_score))
| [
"[email protected]"
] | |
e769b11263a1c2fea3831cc76399e87d0caea22a | 8c6b28c62e13f1007ee908d5f283bed3fca54d66 | /app/routes/register_equipment.py | f29bad92bbc08a71069f465366e63dcccdbc9fa7 | [] | no_license | jvsn19/modec-api | 2f6ba0ef03ed2d42baa200a6e92494f7c7ca20d4 | a869a7a69515e3b00764bb192c8587ad8917c5d7 | refs/heads/main | 2023-01-04T08:15:17.166021 | 2020-11-05T22:06:44 | 2020-11-05T22:06:44 | 310,398,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | from flask import request
from . import routes
from ..db import CustomDatabase
from ..utils import VesselDoesNotExistException, EquipmentAlreadyCreatedException
@routes.route('/register-equipment', methods=['POST'])
def register_equipment() -> None:
response_json = request.get_json()
params = {
'vessel_id': response_json['vessel-id'],
'equipment_id': response_json['equipment-id'],
'name': response_json['name'],
'location': response_json['location'],
}
try:
CustomDatabase.add_equipment(**params)
return "Created", 201
except EquipmentAlreadyCreatedException as ex:
return str(ex), 409
except VesselDoesNotExistException as ex:
return str(ex), 404
except Exception as _:
return "Bad Request", 400 | [
"[email protected]"
] | |
264b6c57af456663534b23a73353a20cb421322a | 416814a6f129400d0802ef334ac7cea6bd3b69e4 | /educa/courses/fields.py | 5e0a6bbda9fd91738ca0e349d707e91387d65483 | [] | no_license | shahparan9988/CSE327 | d6c875df3be3c9ca03f25166e7a88c9c43ea7216 | f2df19f6213f2ed43360bdc617b8b8c37936dbb9 | refs/heads/master | 2022-02-12T23:48:26.215266 | 2019-07-21T12:56:12 | 2019-07-21T12:56:12 | 198,091,394 | 0 | 0 | null | 2019-07-21T18:29:18 | 2019-07-21T18:29:18 | null | UTF-8 | Python | false | false | 1,493 | py | from django.db import models
from django.core.exceptions import ObjectDoesNotExist
# Using PositiveIntegerField we can easily specify the order of objects
# custom order Field -> inherits PositiveIntegerField
class OrderField(models.PositiveIntegerField):
def __init__(self, for_fields=None, *args, **kwargs):
self.for_fields = for_fields #indicates the field that the order
#has to be calculated with respect to
super(OrderField, self).__init__(*args, **kwargs)
# executes before saving the field in database
def pre_save(self, model_instance, add):
if getattr(model_instance, self.attname) is None:
#no current value
try:
qs = self.models.objects.all()
if self.for_fields:
# filter by objects with the same field values
# for the fields in "for_fields"
query = {field: getattr(model_instance, field)\
for field in self.for_fields}
qs = qs.filter(**query)
# get the order of the last item
last_item = qs.latest(self.attname)
value = last_item.order + 1
except ObjectDoesNotExist:
value = 0
setattr(model_instance, self.attname, value)
return value
else:
return super(OrderField,
self).pre_save(model_instance, add)
| [
"[email protected]"
] | |
ecc5fcce79e4d79a47ed00b608037a48442a8845 | e134c1a98cb9cceaa188fc019ca1a955bf1046b6 | /OzoneLUNA/ozone_response_pcuo.py | b21ffd76884a36c9d3ed723f26103395144660fe | [] | no_license | ziu1986/python_scripts | bd36bfd58e136657432331191c9e08fa85b6d8e1 | df5d237b68e0143f35c5ff1c58190460692fdac8 | refs/heads/master | 2022-05-23T19:24:54.643726 | 2022-04-08T12:17:49 | 2022-04-08T12:17:49 | 170,145,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,826 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sample_from_norm import compute_cuo
from mytools.met_tools import print_all
def cuo(o3_mu, o3_sigma, gs_o3, gs_o3_sigma, o3_fumi, o3_days, **kwarg):
article = kwarg.pop("article", "general")
if (article=="watanabe14"):
exp = kwarg.pop("exp", 'CC')
pcuo = []
pcuo_std = []
cuo_mean, cuo_std = compute_cuo(o3_mu['%s_1' % exp], o3_sigma['%s_1' % exp], gs_o3['%s_1' % exp], gs_o3_sigma['%s_1' % exp], int(o3_fumi['%s_1' % exp]), o3_days['%s_1' % exp])
pcuo.append(cuo_mean)
pcuo_std.append(cuo_std)
cuo_mean, cuo_std = compute_cuo(o3_mu['%s_1.5' % exp], o3_sigma['%s_1.5' % exp], (gs_o3['%s_1' % exp]+gs_o3['%s_2' % exp])*0.5, np.sqrt(gs_o3_sigma['%s_1' % exp]**2+gs_o3_sigma['%s_2' % exp]**2)*0.5, int(o3_fumi['%s_1.5' % exp]), o3_days['%s_1.5' % exp]-o3_days['%s_1' % exp])
cuo_mean = cuo_mean+pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+pcuo_std[-1]**2)
pcuo.append(cuo_mean)
pcuo_std.append(cuo_std)
cuo_mean, cuo_std = compute_cuo(o3_mu['%s_2' % exp], o3_sigma['%s_2' % exp], gs_o3['%s_2' % exp], gs_o3_sigma['%s_2' % exp], int(o3_fumi['%s_2' % exp]), (o3_days['%s_2' % exp]-o3_days['%s_1.5' % exp]))
cuo_mean = cuo_mean+pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+pcuo_std[-1]**2)
pcuo.append(cuo_mean)
pcuo_std.append(cuo_std)
cuo_mean, cuo_std = compute_cuo(o3_mu['%s_3' % exp], o3_sigma['%s_3' % exp], gs_o3['%s_3' % exp], gs_o3_sigma['%s_3' % exp], int(o3_fumi['%s_3' % exp]), (o3_days['%s_3' % exp]-o3_days['%s_2' % exp]))
cuo_mean = cuo_mean+pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+pcuo_std[-1]**2)
pcuo.append(cuo_mean)
pcuo_std.append(cuo_std)
cuo_mean, cuo_std = compute_cuo(o3_mu['%s_4' % exp], o3_sigma['%s_4' % exp], gs_o3['%s_4' % exp], gs_o3_sigma['%s_4' % exp], int(o3_fumi['%s_4' % exp]), (o3_days['%s_4' % exp]-o3_days['%s_3' % exp]))
cuo_mean = cuo_mean+pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+pcuo_std[-1]**2)
pcuo.append(cuo_mean)
pcuo_std.append(cuo_std)
pcuo = np.array(pcuo)
pcuo_std = np.array(pcuo_std)
return(pcuo,pcuo_std)
# Compute accumulated ozone for each article
xu_pcuo = []
xu_pcuo_std = []
for i in range(4):
cuo_mean, cuo_std = compute_cuo(xu_o3_mu[i], xu_o3_sigma[i], xu_gs_o3[i], xu_gs_o3_sigma[i], 10, xu_o3_days[i])
xu_pcuo.append(cuo_mean)
xu_pcuo_std.append(cuo_std)
pelle_pcuo = []
pelle_pcuo_std = []
for i in range(1,6):
cuo_mean, cuo_std = compute_cuo(pelle_o3_mu[i], pelle_o3_sigma[i], (pelle_gs_o3[i]-pelle_gs_o3[i-1])*0.5+pelle_gs_o3[i-1], 0.5*np.sqrt(pelle_gs_o3_sigma[i]**2+pelle_gs_o3_sigma[i-1]**2), 5, pelle_o3_days[i]-pelle_o3_days[i-1])
if i>1:
cuo_mean = cuo_mean+pelle_pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+pelle_pcuo_std[-1]**2)
pelle_pcuo.append(cuo_mean)
pelle_pcuo_std.append(cuo_std)
watanabe_pcuo_cf, watanabe_pcuo_std_cf = cuo(watanabe_o3_mu, watanabe_o3_sigma, watanabe_gs_o3, watanabe_gs_o3_sigma, watanabe_o3_fumi, watanabe_o3_days, exp='CC', article='watanabe14')
watanabe_pcuo, watanabe_pcuo_std = cuo(watanabe_o3_mu, watanabe_o3_sigma, watanabe_gs_o3, watanabe_gs_o3_sigma, watanabe_o3_fumi, watanabe_o3_days, exp='OO', article='watanabe14')
watanabe_pcuo_oc, watanabe_pcuo_std_oc = cuo(watanabe_o3_mu, watanabe_o3_sigma, watanabe_gs_o3, watanabe_gs_o3_sigma, watanabe_o3_fumi, watanabe_o3_days, exp='OC', article='watanabe14')
watanabe_pcuo_co, watanabe_pcuo_std_co = cuo(watanabe_o3_mu, watanabe_o3_sigma, watanabe_gs_o3, watanabe_gs_o3_sigma, watanabe_o3_fumi, watanabe_o3_days, exp='CO', article='watanabe14')
pelle14_pcuo = []
pelle14_pcuo_std = []
pelle14_pcuo1 = []
pelle14_pcuo1_std = []
pelle14_pcuo2 = []
pelle14_pcuo2_std = []
for i in range(1,6):
cuo_mean, cuo_std = compute_cuo(pelle14_o3_mu[i], pelle14_o3_sigma[i], (pelle14_gs_o3[i]-pelle14_gs_o3[i-1])*0.5+pelle14_gs_o3[i-1], 0.5*np.sqrt(pelle14_gs_o3_sigma[i]**2+pelle14_gs_o3_sigma[i-1]**2), 5, pelle14_o3_days[i]-pelle14_o3_days[i-1])
cuo1_mean, cuo1_std = compute_cuo(pelle14_o3_mu[i], pelle14_o3_sigma[i], pelle14_gs_o3[i], pelle14_gs_o3_sigma[i], 5, pelle14_o3_days[i]-pelle14_o3_days[i-1])
cuo2_mean, cuo2_std = compute_cuo(pelle14_o3_mu[i], pelle14_o3_sigma[i], pelle14_gs_o3[i-1], pelle14_gs_o3_sigma[i-1], 5, pelle14_o3_days[i]-pelle14_o3_days[i-1])
if i>1:
cuo_mean = cuo_mean+pelle14_pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+pelle14_pcuo_std[-1]**2)
cuo1_mean = cuo1_mean+pelle14_pcuo1[-1]
cuo1_std = np.sqrt(cuo1_std**2+pelle14_pcuo1_std[-1]**2)
cuo2_mean = cuo2_mean+pelle14_pcuo2[-1]
cuo2_std = np.sqrt(cuo2_std**2+pelle14_pcuo2_std[-1]**2)
pelle14_pcuo.append(cuo_mean)
pelle14_pcuo_std.append(cuo_std)
pelle14_pcuo1.append(cuo1_mean)
pelle14_pcuo1_std.append(cuo1_std)
pelle14_pcuo2.append(cuo2_mean)
pelle14_pcuo2_std.append(cuo2_std)
kinose_pcuo = []
kinose_pcuo_std = []
for j in range(3):
o3_mu = kinose_o3_mu[j::3]
o3_sigma = kinose_o3_sigma[j::3]
gs_o3 = kinose_gs_o3[j::3].interpolate()
gs_o3_sigma = kinose_gs_o3_sigma[j::3].interpolate()
o3_days = kinose_o3_days[j::3]
for i in range(1,kinose_o3_mu[0::3].size):
cuo_mean, cuo_std = compute_cuo(o3_mu[i], o3_sigma[i], (gs_o3[i]-gs_o3[i-1])*0.5+gs_o3[i-1], 0.5*np.sqrt(gs_o3_sigma[i]**2+gs_o3_sigma[i-1]**2), 12, o3_days[i]-o3_days[i-1])
if i>1:
cuo_mean = cuo_mean+kinose_pcuo[-1]
cuo_std = np.sqrt(cuo_std**2+kinose_pcuo_std[-1]**2)
kinose_pcuo.append(cuo_mean)
kinose_pcuo_std.append(cuo_std)
kinose_pcuo = np.array(kinose_pcuo).reshape(3,len(kinose_pcuo)/3)
kinose_pcuo_std = np.array(kinose_pcuo_std).reshape(3,len(kinose_pcuo_std)/3)
watanabe13_pcuo = []
watanabe13_pcuo_std = []
for i in (0,2):
cuo_mean, cuo_std = compute_cuo(watanabe13_o3_mu[i], watanabe13_o3_sigma[i], watanabe13_gs_o3[i], watanabe13_gs_o3_sigma[i], int(watanabe13_o3_fumi[i]), watanabe13_o3_days[i])
watanabe13_pcuo.append(cuo_mean)
watanabe13_pcuo_std.append(cuo_std)
cuo_mean_1, cuo_std_1 = compute_cuo(watanabe13_o3_mu[i], watanabe13_o3_sigma[i], watanabe13_gs_o3[i], watanabe13_gs_o3_sigma[i], int(watanabe13_o3_fumi[i]), watanabe13_o3_days[i]-watanabe13_o3_days[i+1])
cuo_mean_max, cuo_std_max = compute_cuo(watanabe13_o3_mu[i+1], watanabe13_o3_sigma[i+1], watanabe13_gs_o3[i], watanabe13_gs_o3_sigma[i], int(watanabe13_o3_fumi[i+1]), watanabe13_o3_days[i+1])
cuo_mean_min, cuo_std_min = compute_cuo(watanabe13_o3_mu[i+1], watanabe13_o3_sigma[i+1], watanabe13_gs_o3[i+1], watanabe13_gs_o3_sigma[i+1], int(watanabe13_o3_fumi[i+1]), watanabe13_o3_days[i+1])
cuo_mean_mean, cuo_std_mean = compute_cuo(watanabe13_o3_mu[i+1], watanabe13_o3_sigma[i+1], (watanabe13_gs_o3[i]+watanabe13_gs_o3[i+1])*0.5, np.sqrt(watanabe13_gs_o3_sigma[i+1]**2+watanabe13_gs_o3_sigma[i+1]**2)*0.5, int(watanabe13_o3_fumi[i+1]), watanabe13_o3_days[i+1])
cuo_mean = cuo_mean_1+cuo_mean_mean
# Max uncertainty estimation (variation of start- and endpoint gsto)
cuo_std = (cuo_mean_mean-cuo_mean_min, cuo_mean_max-cuo_mean_mean)
watanabe13_pcuo.append(cuo_mean)
watanabe13_pcuo_std.append(cuo_std)
#print(watanabe13_o3_days[i], cuo_mean, cuo_std)
#print(watanabe13_o3_days[i]-watanabe13_o3_days[i+1], cuo_mean_1, cuo_std_1)
#print(watanabe13_o3_days[i+1], cuo_mean_max, cuo_std_max)
#print(watanabe13_o3_days[i+1], cuo_mean_min, cuo_std_min)
#print(watanabe13_o3_days[i+1], cuo_mean_mean, cuo_std_mean)
gao_pcuo = []
gao_pcuo_std = []
for j in range(2):
for i in range(4):
#print(i+j*4)
if j<1:
leaf_age = gao_o3_days[0::2][i+j*4]
else:
leaf_age = gao_o3_days[0::2][i+j*4]-gao_o3_days[0::2][i]
# Select the non filtered data for both measurement dates and cycle them
cuo_mean, cuo_std = compute_cuo(gao_o3_mu[0::2][i+j*4], gao_o3_sigma[0::2][i+j*4], gao_gs_o3[0::2][i+j*4], gao_gs_o3_sigma[0::2][i+j*4], int(gao_o3_fumi[0::2][i+j*4]), leaf_age)
# Push them to accumulated ozone
gao_pcuo.append(cuo_mean)
gao_pcuo_std.append(cuo_std)
#print("nf", j,i, cuo_mean, cuo_std)
if j==0:
leaf_age = gao_o3_days[1::2][i+j*4]
else:
leaf_age = gao_o3_days[1::2][i+j*4]-gao_o3_days[1::2][i]
# Select the ozone treated data
cuo_mean, cuo_std = compute_cuo(gao_o3_mu[1::2][i+j*4], gao_o3_sigma[1::2][i+j*4], gao_gs_o3[1::2][i+j*4], gao_gs_o3_sigma[1::2][i+j*4], int(gao_o3_fumi[1::2][i+j*4]), leaf_age)
# Push them to accumulated ozone
gao_pcuo.append(cuo_mean)
gao_pcuo_std.append(cuo_std)
#print("o3",j,i, cuo_mean, cuo_std)
# Add the accumulation of ozone under ambient conditions (rest of the day)
#cuo_mean, cuo_std = compute_cuo(gao_o3_mu[0::2][i+j*4], gao_o3_sigma[0::2][i+j*4], gao_gs_o3[1::2][i+j*4], gao_gs_o3_sigma[1::2][i+j*4], int(gao_o3_fumi[0::2][i+j*4]-gao_o3_fumi[1::2][i+j*4]), leaf_age)
#print("o3",j,i, cuo_mean, cuo_std)
#gao_pcuo[-1] = gao_pcuo[-1] + cuo_mean
#gao_pcuo_std[-1] = np.sqrt(gao_pcuo_std[-1]**2 + cuo_std**2)
#print("o3",j,i, gao_pcuo[-1], gao_pcuo_std[-1])
##print(cuo_mean, cuo_std)
gao_pcuo = np.array(gao_pcuo)
gao_pcuo_std = np.array(gao_pcuo_std)
gao_pcuo[8:] = gao_pcuo[:8]+gao_pcuo[8:]
gao_pcuo_std[8:] = np.sqrt(gao_pcuo_std[:8]**2+gao_pcuo_std[8:]**2)
harmens_pcuo = []
harmens_pcuo_std = []
for j in range(3):
for i in range(4):
if j==0:
cuo_mean, cuo_std = compute_cuo(harmens_o3_mu[4*j:4*j+4][i], harmens_o3_sigma[4*j:4*j+4][i], harmens_gs_o3[4*j:4*j+4][i],harmens_gs_o3_sigma[4*j:4*j+4][i], harmens_o3_fumi[4*j:4*j+4][i].astype(int), harmens_o3_days[4*j:4*j+4][i])
if j>0:
cuo_mean, cuo_std = compute_cuo(harmens_o3_mu[4*j:4*j+4][i], harmens_o3_sigma[4*j:4*j+4][i], (harmens_gs_o3[4*j:4*j+4][i]-harmens_gs_o3[4*(j-1):4*(j-1)+4][i])*0.5+harmens_gs_o3[4*(j-1):4*(j-1)+4][i], 0.5*np.sqrt(harmens_gs_o3_sigma[4*j:4*j+4][i]**2+harmens_gs_o3_sigma[4*(j-1):4*(j-1)+4][i]**2), harmens_o3_fumi[4*j:4*j+4][i].astype(int), harmens_o3_days[4*j:4*j+4][i]-harmens_o3_days[4*(j-1):4*(j-1)+4][i])
cuo_mean = cuo_mean+harmens_pcuo[4*(j-1):4*(j-1)+4][i]
cuo_std = np.sqrt(cuo_std**2+harmens_pcuo_std[4*(j-1):4*(j-1)+4][i]**2)
harmens_pcuo.append(cuo_mean)
harmens_pcuo_std.append(cuo_std)
harmens_pcuo = np.array(harmens_pcuo)
harmens_pcuo_std = np.array(harmens_pcuo_std)
| [
"[email protected]"
] | |
36360f49cabe5fb47ab93822cade686107ed8662 | 626b230317ce6d53f742319ff194c19d6afc8e17 | /30_daylearn_spider/4-30.py | 8165b89231b51bb5960afb461259d8292e24a466 | [] | no_license | myyyy/Spider | d6fac6b4b5973834f01ba82b14980b6179d66d6a | d7a7ae92778c9837caad4020106e4b54a2922ec9 | refs/heads/master | 2021-01-09T20:52:10.313968 | 2017-06-09T10:34:01 | 2017-06-09T10:34:01 | 57,097,806 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # -*- coding: utf-8 -*-
# URL异常处理
import urllib2
def SpiderDemo():
url = "http://qzone.qq.com/"
user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36"
value = {'u':'name','p':'password'}
header = {'User-Agent':user_agent}
data = urllib2.urlencode(value)
request = urllib2.Request(url,data,header)
try:
response = urllib2.urlopen(request)
page = response.read()
return page
except urllib2.URLError,e:
return e.reason
if __name__=="__main__":
print SpiderDemo() | [
"[email protected]"
] | |
4f4711027be00272d70319e9a88009d4d0e8bcc8 | 3c2fb2221aa68af9f909cea2b15ae883dc5f0bee | /hw5/test.py | e139fa2f5118ddc651f7f729019488f9f7378dfe | [] | no_license | RAYHOU777/ML2017 | 26c70104a4c8c53d43a56c0399a1988764cd5191 | d6bae50984350823e63f6e0692205e3801a0f3b9 | refs/heads/master | 2021-03-19T16:35:54.539917 | 2017-06-08T14:53:17 | 2017-06-08T14:53:17 | 82,925,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py |
import keras.backend as K
import pickle
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import sys
test_path = sys.argv[1]
output_path = sys.argv[2]
def f1_score(y_true,y_pred):
thresh = 0.5
y_pred = K.cast(K.greater(y_pred,thresh),dtype='float32')
tp = K.sum(y_true * y_pred)
precision=tp/(K.sum(y_pred))
recall=tp/(K.sum(y_true))
return 2*((precision*recall)/(precision+recall))
def read_data(path,training):
print ('Reading data from ',path)
with open(path,'r') as f:
tags = []
articles = []
tags_list = []
f.readline()
for line in f:
if training :
start = line.find('\"')
end = line.find('\"',start+1)
tag = line[start+1:end].split(' ')
article = line[end+2:]
for t in tag :
if t not in tags_list:
tags_list.append(t)
tags.append(tag)
else:
start = line.find(',')
article = line[start+1:]
articles.append(article)
if training :
assert len(tags_list) == 38,(len(tags_list))
assert len(tags) == len(articles)
return (tags,articles,tags_list)
#(Y_data,X_data,tag_list) = read_data('train_data.csv',True)
(_, X_test,_) = read_data(test_path,False)
#all_corpus = X_data + X_test
model2 = load_model('best.hdf5', custom_objects={'f1_score': f1_score})
#tokenizer = Tokenizer()
#tokenizer.fit_on_texts(all_corpus)
tag_list = ['SCIENCE-FICTION', 'SPECULATIVE-FICTION', 'FICTION', 'NOVEL', 'FANTASY', "CHILDREN'S-LITERATURE", 'HUMOUR', 'SATIRE', 'HISTORICAL-FICTION', 'HISTORY', 'MYSTERY', 'SUSPENSE', 'ADVENTURE-NOVEL', 'SPY-FICTION', 'AUTOBIOGRAPHY', 'HORROR', 'THRILLER', 'ROMANCE-NOVEL', 'COMEDY', 'NOVELLA', 'WAR-NOVEL', 'DYSTOPIA', 'COMIC-NOVEL', 'DETECTIVE-FICTION', 'HISTORICAL-NOVEL', 'BIOGRAPHY', 'MEMOIR', 'NON-FICTION', 'CRIME-FICTION', 'AUTOBIOGRAPHICAL-NOVEL', 'ALTERNATE-HISTORY', 'TECHNO-THRILLER', 'UTOPIAN-AND-DYSTOPIAN-FICTION', 'YOUNG-ADULT-LITERATURE', 'SHORT-STORY', 'GOTHIC-FICTION', 'APOCALYPTIC-AND-POST-APOCALYPTIC-FICTION', 'HIGH-FANTASY']
#with open("tokenizer.txt", "wb") as f:
# pickle.dump(tokenizer, f, pickle.HIGHEST_PROTOCOL)
tokenizer = pickle.load(open("tokenizer.txt", "r"))
word_index = tokenizer.word_index
max_article_length =None
test_sequences = tokenizer.texts_to_sequences(X_test)
test_sequences = pad_sequences(test_sequences,maxlen=max_article_length)
Y_pred = model2.predict(test_sequences)
thresh = 0.65
with open(output_path,'w') as output:
# print ('\"id\",\"tags\"',file=output)
Y_pred_thresh = (Y_pred > thresh).astype('int')
output.write('"id","tags"\n')
for index,labels in enumerate(Y_pred_thresh):
labels = [tag_list[i] for i,value in enumerate(labels) if value==1 ]
labels_original = ' '.join(labels)
# print ('\"%d\",\"%s\"'%(index,labels_original),file=output)
output.write('"' + str(index) + '"' + ',' + '"' + labels_original + '"' + '\n')
| [
"[email protected]"
] | |
99765317a04672e9d0a7177524447f4636b4014f | 5de9e0cba3693e80c9aa10f76a513e39ac413239 | /mercantile/contrib/teamspeak.py | 9f6cd53c4ce8e4539a36cb938039ddf16d689fe7 | [] | no_license | DeadWisdom/mercantile | 4ca77ef80f295ef8cf38fb17ae0cd4c52e973bfe | f31ccb38043e54f15ade48d9cef9018a0b8ec8d4 | refs/heads/master | 2021-01-22T06:48:50.588271 | 2015-02-19T20:36:01 | 2015-02-19T20:36:01 | 18,348,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | from fabric.decorators import task
from fabric.api import env, sudo, cd, settings, local
@task
def build():
try:
import pyquery
except ImportError:
print "PyQuery not found."
return -1
print env.user
## Add User
with settings(warn_only=True):
sudo("useradd -m -U %s -c \"%s\"" % ("ts", "Teamspeak 3"))
with cd("/home/%s/" % "ts"):
# Change the Shell to Bash
sudo("chsh -s /bin/bash %s" % "ts")
## Get Latest Url
url = get_latest_ts3_url()
filename = url.rsplit('/', 1)[-1]
print "TEAMSPEAK:", url, filename
print "Installing..."
## Install
with cd("/home/ts"):
sudo("wget %s" % url, user="ts")
sudo("tar xzf %s" % filename, user="ts")
## Restart
with cd("/home/ts/teamspeak3-server_linux-amd64"):
with settings(warn_only=True):
sudo("./ts3server_startscript.sh stop", user="ts")
sudo("./ts3server_startscript.sh start", user="ts")
### Helpers ###
def get_latest_ts3_url(root="http://teamspeak.gameserver.gamed.de/ts3/releases"):
from pyquery import PyQuery as pq
doc = pq(url=root)
versions = []
for e in doc("td.n a"):
if e.text.startswith('3'):
try:
tup = e.text.split('.')
versions.append( tuple(int(x) for x in tup) )
except:
continue
versions.sort(reverse=True)
for version in versions:
version = ".".join(str(x) for x in version)
print "%s/%s/" % (root, version)
try:
doc = pq(url="%s/%s/" % (root, version))
except:
continue
target = "teamspeak3-server_linux-amd64-%s.tar.gz" % version
#target = "teamspeak3-server_linux-x86-%s.tar.gz" % version
for e in doc("td.n a"):
print e.text.strip(), target
if e.text.strip() == target:
return "%s/%s/%s" % (root, version, target)
return None | [
"[email protected]"
] | |
e74775b68c3364a87ea2e36ba0593d5ca14a41c7 | 7c9e6d7a596c4b948346e56e8a7b0e0428caa307 | /homework/2021-01-31/int_to_binary.py | 8c7721a203422c228ed28dfced3394cb5b3b09c4 | [] | no_license | WebOrGameMaker/LearnPython | b96ea7653d58c5a1a5341ed818b90a729d29a93d | 0b1aeefd452242f58808acfb6bbda0649c32ff39 | refs/heads/master | 2023-06-07T13:09:19.879813 | 2021-07-04T17:19:09 | 2021-07-04T17:19:09 | 293,579,954 | 0 | 0 | null | 2021-07-04T17:19:10 | 2020-09-07T16:35:26 | Python | UTF-8 | Python | false | false | 460 | py | def binary_str(num):
"""
>>> binary_str(23937495825)
'10110010010110010010011001100010001'
>>> binary_str(876765)
'11010110000011011101'
>>> binary_str(213)
'11010101'
"""
digits = []
while num > 0:
current_digit = num % 2
digits.append(str(current_digit))
num //= 2
digits.reverse()
return "".join(digits)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| [
"[email protected]"
] | |
5a59e7fb8e998d23f9ac3789fc3c5eec2a7ad2f6 | 8bd77ab469c1c95e39db31863172fdf238968dac | /ci/cloudbuild/scheduled/reap-gke-clusters/cleanup_load_balancers.py | 8a6ab06724bc33bcfb809ea77875346b728d4183 | [
"Apache-2.0"
] | permissive | google/kf | 4952b9389cc1381a47f9099fc6021f62304a35bf | 63b13dbe4e41855cf243605e9190229c4fa56da8 | refs/heads/main | 2023-09-03T19:47:13.966967 | 2023-08-29T19:01:36 | 2023-08-29T19:01:36 | 184,312,842 | 404 | 75 | Apache-2.0 | 2023-09-12T19:01:49 | 2019-04-30T18:34:38 | Go | UTF-8 | Python | false | false | 6,468 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import json
import sys
import http.client
from urllib.parse import urlparse
if len(sys.argv) != 2:
print("Usage: %s [PROJECT_ID]" % sys.argv[0])
sys.exit(1)
project_id = sys.argv[1]
def execute(command):
call = subprocess.run(command.split(), stdout=subprocess.PIPE, check=True)
return call.stdout.decode("utf-8")
class TargetPool:
def __init__(self, name, region, health_checks):
self.name = name
self.region = region
self.health_checks = health_checks
def __hash__(self):
return hash("%s/%s" % (self.name, self.region))
def __eq__(self, other):
return (self.name, self.region) == (other.name, other.region)
def extract_region(regionURL):
# regionURL paths look like the following:
# /compute/v1/projects/<PROJECT_ID>/regions/<REGION/6>
url = urlparse(regionURL)
splits = url.path.split("/")
return splits[6]
def target_pools(project_id):
target_pool_list = json.loads(execute("gcloud --project %s compute target-pools list --format='json'" % project_id))
for target_pool in target_pool_list:
health_checks = []
if "healthChecks" in target_pool:
health_checks = target_pool["healthChecks"]
yield TargetPool(target_pool["name"], extract_region(target_pool["region"]), health_checks)
def instances(project_id, target_pool):
target_pool_desc = json.loads(execute("gcloud --project %s compute target-pools describe --region %s --format='json' %s" % (project_id, target_pool.region, target_pool.name)))
if "instances" in target_pool_desc:
for instanceURL in target_pool_desc["instances"]:
yield instanceURL
def valid_instance(instanceURL):
url = urlparse(instanceURL)
conn = http.client.HTTPSConnection(url.netloc)
auth_token = "Bearer " + execute("gcloud --project %s auth print-access-token" % project_id).strip()
headers = {"Authorization": auth_token}
conn.request("GET", url.path, headers=headers)
return conn.getresponse().getcode() == 200
def valid_target_pool(project_id, target_pool):
for instanceURL in instances(project_id, target_pool):
if valid_instance(instanceURL):
# Found a valid instance, we know the target pool is valid
return True
# We didn't find a valid instance, must be invalid
return False
def map_forwarding_rules(project_id):
forwarding_rules = json.loads(execute("gcloud --project %s compute forwarding-rules list --format='json'" % project_id))
result = {}
# The target pool is under the 'target' field. However it is listed as a
# URL. The path has the following format:
# compute/v1/projects/<PROJECT_ID>}/regions/<REGION/6>/targetPools/<TARGET_POOL/8>
for forwarding_rule in forwarding_rules:
url = urlparse(forwarding_rule["target"])
splits = url.path.split("/")
region = splits[6]
target_pool_name = splits[8]
result.update({TargetPool(target_pool_name, region, []): forwarding_rule["name"]})
return result
def map_health_checks(project_id):
result = {}
for target_pool in target_pools(project_id):
for health_check in target_pool.health_checks:
result.update({health_check: target_pool.name})
return result
def health_checks(project_id):
heath_check_list = json.loads(execute(f"gcloud --project {project_id} compute http-health-checks list --format='json'"))
for health_check in heath_check_list:
if "name" in health_check:
yield health_check["name"]
# We'll cache these so we don't have to do it multiple times.
forwarding_rules = map_forwarding_rules(project_id)
mapped_health_checks = map_health_checks(project_id)
def delete_associated_forwarding_rule(project_id, target_pool):
if target_pool not in forwarding_rules:
# Looks like we don't know about an associated forwarding rule
print("did not find a forwarding rule for target pool %s (region %s)" % (target_pool.name, target_pool.region))
return
forwarding_rule_name = forwarding_rules[target_pool]
print("delete forwarding rule %s (associated with target pool %s)" % (forwarding_rule_name, target_pool.name))
print(execute("gcloud --quiet --project %s compute forwarding-rules delete --region %s %s" % (project_id, target_pool.region, forwarding_rule_name)))
def delete_target_pool(project_id, target_pool):
delete_associated_forwarding_rule(project_id, target_pool)
print(f"deleting target-pool {target_pool.name} in zone {target_pool.region}")
print(execute("gcloud --quiet --project %s compute target-pools delete --region %s %s" % (project_id, target_pool.region, target_pool.name)))
def delete_health_check(project_id, health_check):
print(f"deleting HTTP health check {health_check}...")
print(execute(f"gcloud --quiet --project {project_id} compute http-health-checks delete {health_check}"))
def delete_abandoned_target_pools(project_id):
for target_pool in target_pools(project_id):
if valid_target_pool(project_id, target_pool):
print("target pool %s (region %s) is valid" % (target_pool.name, target_pool.region))
else:
print("target pool %s (region %s) is not valid... deleting" % (target_pool.name, target_pool.region))
delete_target_pool(project_id, target_pool)
# delete_abandoned_health_checks looks for all the HTTP health checks that
# don't have an associated target pool. Any it finds, it deletes.
def delete_abandoned_health_checks(project_id):
for health_check in health_checks(project_id):
if health_check not in mapped_health_checks:
delete_health_check(project_id, health_check)
def main():
delete_abandoned_target_pools(project_id)
delete_abandoned_health_checks(project_id)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a7357387bb3adfafb8532fab01ed308d44c6ac8f | bda7570bb01ade12f60fec9433795a288dc0de63 | /PreetyPrint.py | a25019d1e6f3825ddc413437d21a57dad1dec991 | [] | no_license | nayana8/Prep1 | e4639decc0218d0026603c084ffca2a11f4f0d7e | 0d3e79202de069100fdf73182ca2ddddb663e606 | refs/heads/master | 2020-03-18T01:06:37.827422 | 2018-06-28T06:15:00 | 2018-06-28T06:15:00 | 134,127,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | class Solution:
# @param A : integer
# @return a list of list of integers
def prettyPrint(self, A):
if A == 0:
return []
n = (A * 2) - 1
last = n - 1
level = A
matrix = [[0 for i in range(0,n)] for j in range(0, n)]
for k in range(0, level):
for i in range(k, n):
for j in range(i, n):
matrix[i][j] = A
matrix[j][i] = A
matrix[last][j] = A
matrix[j][last] = A
A = A - 1
last = last - 1
n = n - 1
return matrix | [
"[email protected]"
] | |
039335e1bd05152988f56c87f8fe116621bfa91a | 8aeefa27b94bf02f79cc73b7c030d1a7eaa76f53 | /myApp/migrations/0007_auto_20210930_0937.py | 20276d88ac326cd1e65f350660c9d9d0fcba9f84 | [] | no_license | Prasadchaskar/InterviewDashboard | e99d63ec65edfafcb76c2d5512f25b9c3ec5b0eb | aa3b53e2b6125dbbd6bd114caa00a399ba3604ef | refs/heads/main | 2023-08-17T13:11:35.183670 | 2021-10-14T08:49:05 | 2021-10-14T08:49:05 | 402,381,063 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | # Generated by Django 3.2 on 2021-09-30 04:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0006_auto_20210927_1530'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='company',
field=models.CharField(choices=[('Uptricks', 'Uptricks'), ('Kukbit', 'Kukbit'), ('Skillbit', 'Skillbit'), ('Learntricks', 'Learntricks'), ('Challengekatta', 'Challengekatta'), ('Happieloop', 'Happieloop'), ('Internshipmela', 'Internshipmela')], max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='post',
field=models.CharField(choices=[('Internship', 'Internship'), ('Job', 'Job')], max_length=50),
),
migrations.AlterField(
model_name='candidate',
name='technology',
field=models.CharField(choices=[('Video Editor Animation', 'Video Editor Animation'), ('Android Development', 'Android Development'), ('Game Development', 'Game Development'), ('Graphics Designing', 'Graphics Designing'), ('Software Testing', 'Software Testing'), ('Manual Testing', 'Manual Testing'), ('Full-stack Development', 'Full-stack Development'), ('Human Resource', 'Human Resource'), ('Digital Marketing', 'Digital Marketing'), ('Wordpress Development', 'Wordpress Development'), ('Web Auditor', 'Web Auditor'), ('Web developer', 'Web developer'), ('Business development executive', 'Business development executive'), ('Machine learning', 'Machine learning'), ('Machine learning', 'Machine learning'), ('AWS', 'AWS')], max_length=50),
),
]
| [
"chaskarprasad2000.com"
] | chaskarprasad2000.com |
b49b7e922640e6c806235e56eda6f345abd4b005 | 3490dbcd3820c6c1745cc3efcf05d14bcb6b8448 | /todo/test_forms.py | 7f138d00d09f67d38632f7cb585c4dacac88d412 | [] | no_license | NgiapPuoyKoh/fs-hello-django | 0dc2f41a9999b945589eaf13a58f5a9618a93b10 | 3a438c76c07b22aa6be3eed27e5d6dd2e24a7b04 | refs/heads/master | 2023-06-02T11:06:30.445732 | 2021-06-15T17:34:30 | 2021-06-15T17:34:30 | 305,090,727 | 0 | 0 | null | 2021-05-21T09:11:30 | 2020-10-18T11:55:30 | Python | UTF-8 | Python | false | false | 654 | py | from django.test import TestCase
from .forms import ItemForm
# Create your tests here.
class TestItemForm(TestCase):
def test_item_name_is_required(self):
form = ItemForm({'name': ''})
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors.keys())
self.assertEqual(form.errors['name'][0], 'This field is required.')
def test_done_field_is_not_required(self):
form = ItemForm({'name': 'Test Todo Item'})
self.assertTrue(form.is_valid())
def test_fields_are_explicit_in_form_metaclass(self):
form = ItemForm()
self.assertEqual(form.Meta.fields, ['name', 'done']) | [
"[email protected]"
] | |
4b7488fe8381ea5e6817663563d6415fae03e8d5 | be503c37c064fedc7210696fc5f1ce1666a8ed49 | /tools/compute_joint_ll.py | 635c25ede8cab65fe10585518c0c60fe1007f283 | [] | no_license | Anantha-Ravi-Kiran/learning-correlated-topic-modelling | 87aaf43d6327fc1b974f58805bf8a9f0a7e47c19 | 3dbc3e86f9a1e3e992ec687503c6ac3924e63db7 | refs/heads/master | 2021-01-13T02:07:40.887944 | 2013-11-11T20:05:47 | 2013-11-11T20:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import sys
import numpy as np
inp_file = sys.argv[1]
with open(inp_file) as f:
lines = f.read().splitlines()
lines = lines[:len(lines)-1]
lines = [float(x) for x in lines]
lines = np.array(lines)
print lines.sum()/lines.shape[0]
| [
"[email protected]"
] | |
e95171929db5a433b0db41a29a5797ad04f6ae0a | 0f4d9bc794d1b2b87c4b607af735a2819435f991 | /TodoApp/Resources/TagResources.py | 4cd0139a25ec28bcfcb748a3d625b36027d29cc5 | [] | no_license | gamesbrainiac/Pony-Todo-API | daf592461b6021ce4b0b47bda85f6b66968e7c1d | 004bcf5807d712571b021213fe5b8823ac847734 | refs/heads/master | 2016-09-09T17:06:13.982176 | 2014-10-15T12:53:02 | 2014-10-15T12:53:02 | 24,995,619 | 8 | 1 | null | 2014-10-10T01:30:57 | 2014-10-09T15:30:56 | Python | UTF-8 | Python | false | false | 932 | py | # encoding=utf-8
from TodoApp.Models.User import User
__author__ = "Quazi Nafiul Islam"
import flask_restful as rest
from flask import g
from pony import orm
from TodoApp.Models.Tag import Tag
class Tags(rest.Resource):
def get(self):
"""Will show you all tags"""
with orm.db_session:
return {
tag.name: tag.url
for tag in User[g.user].tags
}
class TagItem(rest.Resource):
def get(self, tag_id):
"""
Will show you information about a specific tag
:param tag_id: ID for the tag
:type tag_id: int
"""
try:
with orm.db_session:
tag = Tag[tag_id]
todos = list(tag.todos.data)
return {
"tag": tag.name,
"tasks": todos
}
except orm.ObjectNotFound:
return {}, 404 | [
"[email protected]"
] | |
06368bd9b6e0d22c1ed9cc5152bc6551053d776e | 1b629efe07b2ca138dcfe409320dc1a0b1f89441 | /analysis/doubt_count.py | bd527beeaaa2ccc96ea2e3d3e560794a34e6eed1 | [] | no_license | zhenhuaplus/svet-wrapper | f88c7939fb14c1c4d90dbe5db8ba6d452d1aed5e | 01faa68c7b81297c09610e5ee27db908033f0d3b | refs/heads/main | 2023-06-04T06:59:09.040485 | 2021-06-12T06:17:29 | 2021-06-12T06:17:29 | 329,802,303 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,985 | py | import pandas as pd
import numpy as np
from plotly import graph_objs as go
from plotly.subplots import make_subplots
from vc_wrap import SvetObject
def run_double_count(iso_name, tariff):
# Initialize results
Finance_customer_tariff_filename = "/Users/zhenhua/Desktop/price_data/tariff_data_fake/{}.csv".format(
tariff)
Scenario_time_series_filename = "/Users/zhenhua/Desktop/price_data/hourly_timeseries_{}_2019_200x.csv".format(
iso_name)
results = pd.DataFrame(columns=["Case #", "DA ETS", "SR", "NSR",
"Avoided Demand", "Avoided Energy", "Capex", "O&M Cost", "NPV"])
# Case 0a: use retail rates for RS
case0a = SvetObject(SVet_absolute_path="/Applications/storagevet2v101/StorageVET-master-git/",
default_params_file="Model_Parameters_2v1-0-2_default_03-2021.csv",
shortname="{} RS on".format(iso_name),
description="{} 200x".format(iso_name),
Scenario_n="36",
Finance_npv_discount_rate="7",
Scenario_time_series_filename=Scenario_time_series_filename,
Finance_customer_tariff_filename=Finance_customer_tariff_filename,
DCM_active='no', retailTimeShift_active='yes', DA_active='no',
SR_active='no', NSR_active='no', FR_active="no", FR_CombinedMarket="1")
case0a.run_storagevet()
results = results.append({"Case #": "0 - RS",
"Avoided Demand": case0a.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": case0a.npv_new["Avoided Energy Charge"][0],
"Capex": case0a.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case0a.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case0a.npv_new["Lifetime Present Value"][0]}, ignore_index=True)
# Case 0b: use retail rates for RS+DCM
case0b = SvetObject(SVet_absolute_path="/Applications/storagevet2v101/StorageVET-master-git/",
default_params_file="Model_Parameters_2v1-0-2_default_03-2021.csv",
shortname="{} RS+DCM on".format(iso_name),
description="{} 200x".format(iso_name),
Scenario_n="36",
Finance_npv_discount_rate="7",
Scenario_time_series_filename=Scenario_time_series_filename,
Finance_customer_tariff_filename=Finance_customer_tariff_filename,
DCM_active='yes', retailTimeShift_active='yes', DA_active='no',
SR_active='no', NSR_active='no', FR_active="no", FR_CombinedMarket="1")
case0b.run_storagevet()
results = results.append({"Case #": "0 - RS+DCM",
"Avoided Demand": case0b.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": case0b.npv_new["Avoided Energy Charge"][0],
"Capex": case0b.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case0b.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case0b.npv_new["Lifetime Present Value"][0]}, ignore_index=True)
# Case 0c: use DA rates for wholesale participation only
case0c = SvetObject(SVet_absolute_path="/Applications/storagevet2v101/StorageVET-master-git/",
default_params_file="Model_Parameters_2v1-0-2_default_03-2021.csv",
shortname="{} DA+SR on".format(iso_name),
description="{} 200x".format(iso_name),
Scenario_n="36",
Finance_npv_discount_rate="7",
Scenario_time_series_filename=Scenario_time_series_filename,
Finance_customer_tariff_filename=Finance_customer_tariff_filename,
DCM_active='no', retailTimeShift_active='no', DA_active='yes',
SR_active='yes', NSR_active='no', FR_active="no", FR_CombinedMarket="1")
case0c.run_storagevet()
results = results.append({"Case #": "0 - DA+SR",
"DA ETS": case0c.npv_new["DA ETS"][0],
"SR": case0c.npv_new["Spinning Reserves"][0],
"Capex": case0c.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case0c.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case0c.npv_new["Lifetime Present Value"][0]}, ignore_index=True)
case1b = SvetObject(SVet_absolute_path="/Applications/storagevet2v101/StorageVET-master-git/",
default_params_file="Model_Parameters_2v1-0-2_default_03-2021.csv",
shortname="{} DA+SR+DCM on".format(iso_name),
description="{} 200x".format(iso_name),
Scenario_n="36",
Finance_npv_discount_rate="7",
Scenario_time_series_filename=Scenario_time_series_filename,
Finance_customer_tariff_filename=Finance_customer_tariff_filename,
DCM_active='yes', retailTimeShift_active='no', DA_active='yes',
SR_active='yes', NSR_active='no', FR_active="no", FR_CombinedMarket="1")
case1b.run_storagevet()
results = results.append({"Case #": "1 - use DA for DA+SR+DCM after double counting", "DA ETS": case1b.npv_new["DA ETS"][0],
"SR": case1b.npv_new["Spinning Reserves"][0],
"Avoided Demand": case1b.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": case1b.npv_new["Avoided Energy Charge"][0],
"Capex": case1b.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case1b.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case1b.npv_new["Lifetime Present Value"][0]}, ignore_index=True)
results = results.append({"Case #": "1 - use DA for DA+SR+DCM before double counting", "DA ETS": case1b.npv_new["DA ETS"][0],
"SR": case1b.npv_new["Spinning Reserves"][0],
"Avoided Demand": case1b.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": 0,
"Capex": case1b.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case1b.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case1b.npv_new["Lifetime Present Value"][0] -
case1b.npv_new["Avoided Energy Charge"][0]}, ignore_index=True)
results = results.append({"Case #": "1 - double count delta",
"NPV": case1b.npv_new["Avoided Energy Charge"][0]}, ignore_index=True)
case2a = SvetObject(SVet_absolute_path="/Applications/storagevet2v101/StorageVET-master-git/",
default_params_file="Model_Parameters_2v1-0-2_default_03-2021.csv",
shortname="{} RS+SR+DCM on".format(iso_name),
description="{} 200x".format(iso_name),
Scenario_n="36",
Finance_npv_discount_rate="7",
Scenario_time_series_filename=Scenario_time_series_filename,
Finance_customer_tariff_filename=Finance_customer_tariff_filename,
DCM_active='yes', retailTimeShift_active='yes', DA_active='no',
SR_active='yes', NSR_active='no', FR_active="no", FR_CombinedMarket="1")
case2a.run_storagevet()
results = results.append({"Case #": "2 - use RS for RS+SR+DCM before double counting",
"SR": case2a.npv_new["Spinning Reserves"][0],
"Avoided Demand": case2a.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": case2a.npv_new["Avoided Energy Charge"][0],
"Capex": case2a.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case2a.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case2a.npv_new["Lifetime Present Value"][0]}, ignore_index=True)
# TODO
case2a_ts_results = pd.read_csv(case2a.runID_dispatch_timeseries_path)
da_ets_corrected_yearly = np.dot(case2a.initial_hourly_timeseries["DA Price ($/kWh)"],
case2a_ts_results["Load (kW)"]) - \
np.dot(case2a.initial_hourly_timeseries["DA Price ($/kWh)"],
case2a_ts_results["Net Load (kW)"])
da_ets_corrected_npv_list = []
for i in range(0, 15):
da_ets_corrected_npv_list.append(da_ets_corrected_yearly * (1 + 0.03) ** i)
da_ets_corrected_npv_list = [0] + da_ets_corrected_npv_list
da_ets_corrected_npv = np.npv(0.07, da_ets_corrected_npv_list)
results = results.append({"Case #": "2 - use RS for RS+SR+DCM after double counting",
"DA ETS": da_ets_corrected_npv,
"SR": case2a.npv_new["Spinning Reserves"][0],
"Avoided Demand": case2a.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": case2a.npv_new["Avoided Energy Charge"][0],
"Capex": case2a.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case2a.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case2a.npv_new["Lifetime Present Value"][0] + da_ets_corrected_npv},
ignore_index=True)
results = results.append({"Case #": "2 - double count delta",
"NPV": da_ets_corrected_npv},
ignore_index=True)
case3 = SvetObject(SVet_absolute_path="/Applications/storagevet2v101/StorageVET-master-git/",
default_params_file="Model_Parameters_2v1-0-2_default_03-2021.csv",
shortname="{} DA+RS+DCM+SR on".format(iso_name),
description="{} 200x".format(iso_name),
Scenario_n="36",
Finance_npv_discount_rate="7",
Scenario_time_series_filename=Scenario_time_series_filename,
Finance_customer_tariff_filename=Finance_customer_tariff_filename,
DCM_active='yes', retailTimeShift_active='yes', DA_active='yes',
SR_active='yes', NSR_active='no', FR_active="no", FR_CombinedMarket="1")
case3.run_storagevet()
results = results.append({"Case #": "3 - use DA and RS for DA+SR+RS+DCM",
"DA ETS": case3.npv_new["DA ETS"][0],
"SR": case3.npv_new["Spinning Reserves"][0],
"Avoided Demand": case3.npv_new["Avoided Demand Charge"][0],
"Avoided Energy": case3.npv_new["Avoided Energy Charge"][0],
"Capex": case3.npv_new["2MW-5hr Capital Cost"][0],
"O&M Cost": case3.npv_new["2MW-5hr Fixed O&M Cost"][0],
"NPV": case3.npv_new["Lifetime Present Value"][0]}, ignore_index=True)
results.sort_values(by="Case #").reset_index(drop=True)\
.to_csv("/Users/zhenhua/Desktop/double_count_results_0410/{}_{}.csv".format(iso_name, tariff))
# Plot prices & results
case1b_ts_results = pd.read_csv(case1b.runID_dispatch_timeseries_path)
case2a_ts_results = pd.read_csv(case2a.runID_dispatch_timeseries_path)
case1b_ts_results["date"] = pd.to_datetime(case1b_ts_results["Start Datetime (hb)"]).dt.date
case1b_ts_results["hour (hb)"] = pd.to_datetime(case1b_ts_results["Start Datetime (hb)"]).dt.hour
case2a_ts_results["date"] = pd.to_datetime(case2a_ts_results["Start Datetime (hb)"]).dt.date
case2a_ts_results["hour (hb)"] = pd.to_datetime(case2a_ts_results["Start Datetime (hb)"]).dt.hour
fig = make_subplots(rows=2, cols=2,
subplot_titles=("DA and retail", "SR",
"DA as signal, RS to double count",
"RS as signal, DA to double count"))
for date in set(case1b_ts_results["date"]):
data = case1b_ts_results[case1b_ts_results["date"] == date].reset_index()
fig.add_trace(go.Scatter(x=data["hour (hb)"], y=data["DA Price Signal ($/kWh)"], line=dict(color='blue'),
opacity=0.2, name=str(date)), row=1, col=1)
fig.add_trace(go.Scatter(x=data["hour (hb)"], y=data["SR Price Signal ($/kW)"], line=dict(color='green'),
opacity=0.5, name=str(date)), row=1, col=2)
fig.add_trace(go.Scatter(x=data["hour (hb)"], y=data["2MW-5hr Power (kW)"], line=dict(color='blue'),
opacity=0.2, name=str(date)), row=2, col=1)
data2 = case2a_ts_results[case2a_ts_results["date"] == date].reset_index()
fig.add_trace(go.Scatter(x=data2["hour (hb)"], y=data2["Energy Price ($/kWh)"], line=dict(color='red'),
opacity=0.5, name=str(date)), row=1, col=1)
fig.add_trace(go.Scatter(x=data2["hour (hb)"], y=data2["2MW-5hr Power (kW)"], line=dict(color='blue'),
opacity=0.2, name=str(date)), row=2, col=2)
fig.update_layout(title="{}_{}".format(iso_name, tariff))
return results, fig
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-14-18")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-15-19")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-16-20")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-17-21")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-18-22")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-19-23")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak4-20-24")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak12-18")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak13-19")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak14-20")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak15-21")
# fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak16-22")
# fig.show()
results, fig = run_double_count(iso_name="caiso", tariff="peak17-23")
fig.show()
# results, fig = run_double_count(iso_name="caiso", tariff="peak18-24")
# fig.show()
| [
"[email protected]"
] | |
bf8204844134d11fb338976ce96378ae2042d6fb | a4fc0dd8dcda38de3010fac6b851e6cca3b6ee4e | /tests/test_utils.py | c10f1463db2e716a3bcff170bd455e2783e62a50 | [
"MIT"
] | permissive | Leticia-maria/funsies | eaaf72bc5a96bc59530cd3cbe99d60fff9834d71 | 12656f49420e9062edb9dd4c34aa18bcc94880f1 | refs/heads/master | 2023-08-22T02:07:58.719514 | 2021-10-02T18:17:47 | 2021-10-02T18:17:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,306 | py | """Test of Funsies utility functions."""
# std
from typing import List
# external
import pytest
# funsies
from funsies import errors, Fun, morph, options, put, take, utils
from funsies._context import get_connection
from funsies._run import run_op
from funsies.config import MockServer
from funsies.types import Error, ErrorKind, UnwrapError
def test_concat() -> None:
"""Test concatenation."""
with Fun(MockServer()):
db, store = get_connection()
dat1 = put(b"bla")
dat2 = put(b"bla")
cat = utils.concat(dat1, dat2)
run_op(db, store, cat.parent)
assert take(cat) == b"blabla"
cat = utils.concat(dat1, dat1, dat1, join=b" ")
run_op(db, store, cat.parent)
assert take(cat) == b"bla bla bla"
def test_match() -> None:
"""Test error matching."""
results = [b"bla bla", errors.Error(kind=errors.ErrorKind.NotFound)]
assert utils.match_results(results, lambda x: x) == [b"bla bla"]
def unity(x: bytes) -> bytes:
return x
def err(x: errors.Error) -> errors.ErrorKind:
return x.kind
results2: List[errors.Result[bytes]] = [
b"bla bla",
errors.Error(kind=errors.ErrorKind.NotFound),
]
assert utils.match_results(results2, unity, err) == [
b"bla bla",
errors.ErrorKind.NotFound,
]
def test_truncate() -> None:
"""Test truncation."""
with Fun(MockServer()):
db, store = get_connection()
inp = "\n".join([f"{k}" for k in range(10)])
dat1 = put(inp.encode())
trunc = utils.truncate(dat1, 2, 3)
run_op(db, store, trunc.parent)
assert take(trunc) == ("\n".join(inp.split("\n")[2:-3])).encode()
def test_exec_all() -> None:
"""Test execute_all."""
with Fun(MockServer(), defaults=options(distributed=False)):
results = []
def div_by(x: float) -> float:
return 10.0 / x
for i in range(10, -1, -1):
val = put(float(i))
results += [morph(div_by, val)]
with pytest.raises(UnwrapError):
take(results[0])
err = utils.execute_all(results)
print(take(results[0]))
v = take(err, strict=False)
assert isinstance(v, Error)
assert v.kind == ErrorKind.ExceptionRaised
| [
"[email protected]"
] | |
73cd0d36dba5121d488942cdf8f5fb0037d4ca18 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v6_0_2f/interface/hundredgigabitethernet/switchport/access/rspan_access/__init__.py | 0c094df735e4ba5e000dd99a7d7d0802011fc9d7 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,447 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class rspan_access(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport/access/rspan-access. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The access layer characteristics of this
interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__rspan_access_vlan',)
_yang_name = 'rspan-access'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rspan_access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="rspan-access-vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify rspan-vlan id to set as access vlan', u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'access', u'rspan-access']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'access']
def _get_rspan_access_vlan(self):
"""
Getter method for rspan_access_vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access/rspan_access/rspan_access_vlan (vlan-type)
YANG Description: Specify rspan-vlan id to set as access vlan
"""
return self.__rspan_access_vlan
def _set_rspan_access_vlan(self, v, load=False):
"""
Setter method for rspan_access_vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access/rspan_access/rspan_access_vlan (vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_rspan_access_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rspan_access_vlan() directly.
YANG Description: Specify rspan-vlan id to set as access vlan
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="rspan-access-vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify rspan-vlan id to set as access vlan', u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rspan_access_vlan must be of a type compatible with vlan-type""",
'defined-type': "brocade-interface:vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="rspan-access-vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify rspan-vlan id to set as access vlan', u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)""",
})
self.__rspan_access_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_rspan_access_vlan(self):
self.__rspan_access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="rspan-access-vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify rspan-vlan id to set as access vlan', u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='vlan-type', is_config=True)
rspan_access_vlan = __builtin__.property(_get_rspan_access_vlan, _set_rspan_access_vlan)
_pyangbind_elements = {'rspan_access_vlan': rspan_access_vlan, }
| [
"[email protected]"
] | |
45a08f7682ead31dfeed72ac54a34dd40082e946 | de26d82255c883f197797b2e7d5981b6021f4fd4 | /rewrite/pst.py | bd6cc61c9fb276bdabd3e5d68b8a4d795b77ab98 | [] | no_license | hofmannedv/python-network-traffic | 212022cba54319a3ebeb66c1374713a6b63ea4c5 | e46b83d9a70f24c56b96971156367c74b2200123 | refs/heads/master | 2023-02-06T12:29:20.889459 | 2020-12-29T11:10:58 | 2020-12-29T11:10:58 | 324,977,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | import pyshark
import time
import re
# define interface
networkInterface = "enp0s3"
# define capture object
capture = pyshark.LiveCapture(interface=networkInterface)
print("listening on %s" % networkInterface)
# scan for five network packages
# print(" ")
# print("Scan for 5 packets")
#
# for pkt in capture.sniff_continuously(packet_count=5):
# # default output
# print(pkt)
# scan for five network packages and display header + content
print(" ")
print("Scan for 10 packages for being TCP, UDP or IPv4 packets")
for pkt in capture.sniff_continuously(packet_count=10):
# adjusted output
try:
# get timestamp
localtime = time.asctime(time.localtime(time.time()))
# get packet content
protocol = pkt.transport_layer
src_addr = pkt.ip.src
src_port = pkt[protocol].srcport
dst_addr = pkt.ip.dst
dst_port = pkt[protocol].dstport
flags = ""
# output packet info
print ("%s IP %s:%s <-> %s:%s (%s): Flags: %s" % (localtime, src_addr, src_port, dst_addr, dst_port, protocol, flags))
# output packet data
print ("data:")
payload = pkt.tcp.payload
payloadEntries = payload.split(":")
position = 10
n = m = 0
while n < len(payloadEntries):
m = m + 16
positionString = "%04d" % position
dataString = " ".join(payloadEntries[n:m])
# prepare ascii output
asciibasis = dataString.replace(" ","")
asciiString = bytearray.fromhex(asciibasis).decode('latin-1')
pattern3 = re.compile("[^a-z0-9]", re.IGNORECASE)
asciiString = re.sub(pattern3, ".", asciiString)
# combine 2x2 letters
pattern1 = re.compile("([a-z0-9]{2})\s([a-z0-9]{2})")
pattern2 = r"\1\2"
dataString = re.sub(pattern1, pattern2, dataString)
# make sure the string is exactly 40 characters
dataString = dataString.ljust(40)
print ("0x%s: %s %s" % (positionString, dataString, asciiString))
n = m
position = position + 10
except AttributeError as e:
# # ignore packets other than TCP, UDP and IPv4
pass
print (" ")
| [
"[email protected]"
] | |
31afe073ff484dc3ba5a2e142d00ca193ef49026 | e879205ec5f82d73b480594ba7cc58b68b13af7f | /hello_world.py | 697cf9a32ba20154116835c67d9b11010a19508c | [] | no_license | BoraMutluoglu/Python-Projects | 51456386893bc40838c74899db0a401288c48747 | a6076a0f80b3599338b4bd96c644e25028846b26 | refs/heads/master | 2020-03-08T06:59:24.462668 | 2019-06-17T18:46:38 | 2019-06-17T18:46:38 | 127,984,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | #! /usr/bin/env python3
print("hello world")
| [
"[email protected]"
] | |
7ad94a15b350c206a2363e9acbdfee1d65f47637 | 7f8f307b76e8f429c9ffd72f9e5643720afcc955 | /importbidang/import_bidang.py | 03b46c1c1d8bcc09bed19ffa96c51da02f1a9e3f | [] | no_license | imseptin/PBB2 | 01ff59f761b1b35686316f12330fa50b1cb32ae8 | 4425cd111ec37e9aaacf8ab86a93aff44d88443b | refs/heads/master | 2020-09-24T21:12:39.113779 | 2017-02-10T01:11:02 | 2017-02-10T01:11:02 | 67,618,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,846 | py | # -*- coding: utf-8 -*-
"""
/***************************************************************************
ImportBidang
A QGIS plugin
Import bidang ke Basisdata Bidang
-------------------
begin : 2017-02-08
git sha : $Format:%H$
copyright : (C) 2017 by Septin Mulatsih Rezki
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from import_bidang_dialog import ImportBidangDialog
from qgis.core import QgsMapLayer
import os.path
import psycopg2 #to connect postgres db
class ImportBidang:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'ImportBidang_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = ImportBidangDialog()
# connect slot
self.dlg.cboLayer.currentIndexChanged.connect(self.index_changed)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Import Bidang')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'ImportBidang')
self.toolbar.setObjectName(u'ImportBidang')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('ImportBidang', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/ImportBidang/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Import Bidang'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Import Bidang'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def daftar_layer(self):
"""Function to get layer list from table of content
:return: list of layer
"""
daftar_layer = []
for layer in self.iface.mapCanvas().layers():
daftar_layer.append(layer)
return daftar_layer
def daftar_kolom(self, layer):
"""Function to get fields list of a layer
:param layer:
:return:
"""
self.dlg.cboField.clear()
if layer.type() == QgsMapLayer.VectorLayer:
layer_fields = layer.pendingFields()
for field in layer_fields:
self.dlg.cboField.addItem(field.name(), field)
def index_changed(self):
"""Mengakomodir perubahan layer terpilih terhadap daftar field yang akan ditampilkan"""
current_index = self.dlg.cboLayer.currentIndex()
layer = self.dlg.cboLayer.itemData(current_index)
self.daftar_kolom(layer)
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
self.dlg.cboLayer.clear()
daftar_layer = self.daftar_layer()
for layer in daftar_layer:
self.dlg.cboLayer.addItem(layer.name(), layer)
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
selectedLayerIndex = self.dlg.cboLayer.currentIndex()
selectedLayer = self.iface.mapCanvas().layers()[selectedLayerIndex]
#selectedLayer.setCrs(QgsCoordinateReferenceSystem(32750))
#fields = selectedLayer.pendingFields()
fieldname = str(self.dlg.cboField.currentText())
for feature in selectedLayer.getFeatures():
idx = selectedLayer.fieldNameIndex(fieldname)
nop = feature.attributes()[idx]
geom = feature.geometry()
geom_wkt = geom.exportToWkt()
#multipolygon = "MULTIPOLYGON((("
geom_wkt_str = geom_wkt[10:-2]
#st_geom = """ST_GeomFromText('"""
#srid = """)', 32750)"""
#geom_wkb_postgis = st_geom + multipolygon +geom_wkt_str + srid
#wkb version, just return geometry, doesn't include SRID
#geom_wkb = geom.asWkb()
#geom_wkb_postgis = geom_wkb.encode('hex')
query1 = '''INSERT INTO gis.tm_bidang3(d_nop,geom) VALUES (%s, ST_GeomFromText(%s, 32750));'''
#query = """INSERT INTO gis.tm_bidang2(d_nop,geom) VALUES (%s, ST_GeomFromText('MULTIPOLYGON(((%s)))', 32750));"""
data = [nop, geom_wkt]
#Parameter Connection to database
host_name = "localhost"
port_name = "5433"
db_name = "db_pbb"
user_name = "postgres"
user_pass = "septin"
#Connection
conn = psycopg2.connect("user='%s' password='%s' host='%s' port='%s' dbname='%s'" %
(user_name, user_pass, host_name, port_name, db_name))
cur = conn.cursor()
cur.execute(query1, data)
conn.commit()
cur.close()
conn.close()
| [
"[email protected]"
] | |
286843621ff8a3018d6028de68ef2bedb69f2c42 | 8b76adbe7ae0e66f8954e64da606d472833c8d74 | /web/gift.py | c7583a422ab179a09e152c0ad49b6b575daaecb6 | [] | no_license | MattHony/YuShu_ | b737756144a28293f75a3753d1abc37f89ee49ad | 19519288f4580bcc7b6b0b28473c8c6aad36e814 | refs/heads/master | 2020-03-25T02:44:37.804757 | 2018-08-02T13:55:28 | 2018-08-02T13:55:28 | 143,305,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/2 21:08
# @Author : '红文'
# @File : gift.py
# @Software: PyCharm
from . import web
@web.route('/my/gifts')
def my_gifts():
pass
@web.route('/gifts/book/<isbn>')
def save_to_gifts(isbn):
pass
@web.route('/gifts/<gid>/redraw')
def redraw_from_gifts(gid):
pass | [
"[email protected]"
] | |
90567629b60811dafa7155c58ca4f08b0ee9b164 | c763082f876196ae49fb47c2dd1e21aa9d5245e3 | /properscoring/_energy_score.py | b1326b663fdd5021f91d4c68ac3c18102da449a2 | [
"Apache-2.0"
] | permissive | tozech/properscoring | 9e4192f1806aa3cd6fa13306c28cd4c72e955073 | bf940c16a738cbbe69c9e65d2cc9655ff50eda70 | refs/heads/master | 2021-07-14T05:01:11.088138 | 2020-11-03T09:34:24 | 2020-11-03T09:34:24 | 221,425,181 | 0 | 0 | Apache-2.0 | 2019-11-13T09:46:48 | 2019-11-13T09:46:47 | null | UTF-8 | Python | false | false | 3,009 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 21:16:18 2020
@author: tzech
"""
import numpy as np
from ._utils import suppress_warnings
try:
from properscoring._gufuncs import _energy_score_gufunc
except ImportError as exc:
def _make_import_error(a):
raise ImportError('Numba is not installed.')
_energy_score_gufunc = lambda x: _make_import_error(x)
# TODO: refactor energy_score to energy_score_vectorized and add numba version
def energy_score(observations, forecasts, weights=None, issorted=False,
axis=-2, feature_axis=-1):
"""computes the energy score
Parameters
----------
observations : np.ndarray
2-dim (samples, features)
forecasts : np.ndarray
3-dim (samples, members, features)
weights : np.ndarray, optional
2-dim (samples, members)
issorted : bool, optional
axis : int, optional
feature_axis : int, optional
Returns
-------
np.ndarray
1-dim (samples) energy score
References
----------
Tilmann Gneiting & Adrian E Raftery (2007) Strictly Proper Scoring Rules,
Prediction, and Estimation, Journal of the American Statistical Association,
102:477, 359-378, DOI: 10.1198/016214506000001437
"""
if issorted:
raise NotImplementedError
if axis != -2:
raise NotImplementedError
if feature_axis != -1:
raise NotImplementedError
observations = np.asarray(observations)
forecasts = np.asarray(forecasts)
weights = np.asarray(weights)
if weights.ndim > 0:
forecasts_nan = np.all(~np.isnan(forecasts), axis=-1)
weights = np.where(forecasts_nan, weights, np.nan)
#Uses mean for NaN handling, requires mean in score = np.nanmean(... later on
weights = weights / np.nanmean(weights, axis=-1, keepdims=True)
else:
weights = np.ones(forecasts.shape[:-1])
weights = weights / np.nanmean(weights, axis=-1, keepdims=True)
if observations.ndim == forecasts.ndim - 1:
# sum over the last axis
# assert observations.shape == forecasts.shape[:-1] #TODO redo
observations = np.expand_dims(observations, axis=-2)
l2norm_resi = np.linalg.norm(forecasts - observations, axis=feature_axis)
with suppress_warnings('Mean of empty slice'):
score = np.nanmean(weights * l2norm_resi, axis=-1)
# insert new axes along last and second to last forecast dimensions so
# forecasts_diff expands with the array broadcasting
forecasts_diff = (np.expand_dims(forecasts, -2) -
np.expand_dims(forecasts, -3))
weights_matrix = (np.expand_dims(weights, -1) *
np.expand_dims(weights, -2))
l2norm_diff = np.linalg.norm(forecasts_diff, axis=feature_axis)
with suppress_warnings('Mean of empty slice'):
score += -0.5 * np.nanmean(weights_matrix * l2norm_diff, axis=(-2, -1))
return score
| [
"[email protected]"
] | |
730584abfa02ef3e2a7640c6b4961314107db4c5 | 605ef2110da81419772072f2e7b1007b50d04196 | /helptutor/services/api/service.py | d0501702d5ee1c167c67f94b90c8df96bf8658be | [] | no_license | kabutoblanco/helptutor_backend | daf441edbc2ee3ad2584e1d62b9195b8f2f4f673 | 47bd13382dce61327a7ae2f5fb85c883bbc99439 | refs/heads/master | 2023-04-15T10:09:11.414371 | 2021-04-28T22:00:13 | 2021-04-28T22:00:13 | 347,634,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | from rest_framework import generics, status, viewsets, mixins, response
from rest_framework.permissions import IsAuthenticated
from helptutor.users.models import Tutor
from helptutor.services.models import Service, Aggrement
from helptutor.services.serializers import *
from drf_yasg.utils import swagger_auto_schema
class ServiceAPIView(viewsets.ModelViewSet):
serializer_class = ServiceCreateSerializer
queryset = Service.objects.filter(is_active=True)
@swagger_auto_schema(
responses={status.HTTP_200_OK: ServiceModelSerializer}
)
def create(self, request, *args, **kwargs):
user = request.user.pk
tutor = Tutor.objects.get(user=user)
request.data['tutor'] = tutor.pk
return super().create(request, args, kwargs)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
instance.is_active = False
self.perform_update(instance)
Aggrement.objects.filter(service=instance.pk).update(is_active=False)
return response.Response(status=status.HTTP_200_OK)
class TutorServicesAPI(generics.ListAPIView):
serializer_class = ServiceModelSerializer
permission_classes = (IsAuthenticated, )
def get_queryset(self):
queryset = Service.objects.filter(tutor__user=self.request.user.pk, is_active=True)
return queryset
| [
"[email protected]"
] | |
04bac3c4ab7b8005c1c6edd4eb2e71d0fea59c58 | dc6c0a0862487ce56c92f0d1e6df597e6fb0272c | /huikaka_API/TestCase_Lib/CustomerQrcode.py | 57a3d31645adeba2de9bef44351c2ecd40f3c75d | [] | no_license | youlong533/huikakaAPITest | 9d4a72e62141878fa32d62c588ebb1dd08202e8e | fb02f38dacda995ff537bfb84b194d87cbba9baf | refs/heads/master | 2020-05-19T19:22:40.876200 | 2019-05-09T08:27:34 | 2019-05-09T08:27:34 | 185,177,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from Api_request import Api_request
def customer_qrcode():
r= Api_request()
url = '/api/v1/customer/qrcode'
data = {
'type':''
}
re =r.get_re(data,url)
print(re.text)
return re.text
customer_qrcode() | [
"[email protected]"
] | |
592ecce508ee03af00fc02cf063594585424106d | 93c749ba37eb8b724c7ce81fec40c315917ced7a | /Exemplo-CSV-Write.py | ce3a79ecc104ec50666c8692e94178140a98d683 | [] | no_license | thiagokaiser/aprendendo-python | 7027d904045a0cf913114e8db9f787a59573e157 | 58602e88bbb9e1653d83d90af664f9687ecad907 | refs/heads/master | 2021-09-24T08:08:00.936799 | 2018-10-05T11:39:20 | 2018-10-05T11:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | import csv
lista = []
i = 0
while i <= 190:
i = i + 1
lista.append(i)
print(lista)
with open('c:/temp/testecsv.csv', 'w', newline='') as csvfile:
arquivo = csv.writer(csvfile, delimiter=';')
arquivo.writerow(lista)
| [
"[email protected]"
] | |
12b7ed869a00a01b95ee8fd217548779035c0c8a | c5458f2d53d02cb2967434122183ed064e1929f9 | /sdks/python/test/test_sigma_boolean.py | 717d38d33feb2b8df40cd39dfb9d39217f9f3642 | [] | no_license | ross-weir/ergo-node-api-sdks | fd7a32f79784dbd336ef6ddb9702b9dd9a964e75 | 9935ef703b14760854b24045c1307602b282c4fb | refs/heads/main | 2023-08-24T05:12:30.761145 | 2021-11-08T10:28:10 | 2021-11-08T10:28:10 | 425,785,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | """
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.sigma_boolean import SigmaBoolean
class TestSigmaBoolean(unittest.TestCase):
"""SigmaBoolean unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSigmaBoolean(self):
"""Test SigmaBoolean"""
# FIXME: construct object with mandatory attributes with example values
# model = SigmaBoolean() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2c3d685965678dc6963636f6fe53de2d3ed78f5c | ddbe0cf338b65da7b9910cff658b3ccc8547e426 | /answer/ps0/ps0.py | 7a5ec0bac1beb14f8e630d854812d4440d7867ec | [] | no_license | guoguozy/Python | de455e2084a57a8bf8459a01b86670a6043a62f2 | 9bdef9305488f6d5897aaae6026f108a7365c545 | refs/heads/master | 2021-07-07T12:37:05.601156 | 2020-08-17T04:03:43 | 2020-08-17T04:03:43 | 175,205,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | """
Author : 郭梓煜 数据科学与计算机学院
Student ID : 17341046
mail : [email protected]
"""
import math
num1=input("Enter number x:")
num2=input("Enter number y:")
print("X**y = %s\nlog(x)=%s\n" % (int(num1)**int(num2),math.log(int(num1),2)))
| [
"[email protected]"
] | |
285d66bbd8ef0cd0e03fcceb627acbea8633dd73 | 983e4c777574e7cdf3181d21c00bb6913a6af77f | /src/chat/consumers.py | 21c5e861ec784805b175fde31d1a1518a60aaf6b | [] | no_license | ArunimaKhanna/django-channels | bd45fd6ff09c90a336ca6f5f07a3293a1e05d276 | fac7fd04d35ae218b975b96c128906ef27179bbd | refs/heads/master | 2022-07-20T01:19:37.510904 | 2020-05-20T17:06:39 | 2020-05-20T17:06:39 | 265,563,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py | # from channels.generic.websocket import WebsocketConsumer
# import json
# class ChatConsumer(WebsocketConsumer):
# def connect(self):
# self.accept()
# def disconnect(self, close_code):
# pass
# def receive(self, text_data):
# text_data_json = json.loads(text_data)
# message = text_data_json['message']
# self.send(text_data=json.dumps({
# 'message': message
# }))
# from asgiref.sync import async_to_sync
# from channels.generic.websocket import AsyncWebsocketConsumer
# import json
# class ChatConsumer(AsyncWebsocketConsumer):
# async def connect(self):
# self.room_name = self.scope['url_route']['kwargs']['room_name']
# self.room_group_name = 'chat_%s' % self.room_name
# # Join room group
# await self.channel_layer.group_add(
# self.room_group_name,
# self.channel_name
# )
# await self.accept()
# async def disconnect(self, close_code):
# # Leave room group
# await self.channel_layer.group_discard(
# self.room_group_name,
# self.channel_name
# )
# # Receive message from WebSocket
# async def receive(self, text_data):
# text_data_json = json.loads(text_data)
# message = text_data_json['message']
# # Send message to room group
# await self.channel_layer.group_send(
# self.room_group_name,
# {
# 'type': 'chat_message',
# 'message': message
# }
# )
# # Receive message from room group
# async def chat_message(self, event):
# message = event['message']
# # Send message to WebSocket
# await self.send(text_data=json.dumps({
# 'message': message
# }))
# for commenting or un commenting ctrl+/
from django.contrib.auth import get_user_model
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
import json
from .models import Message
# from ReconnectingWebSocket import 'reconnecting-websocket'
User = get_user_model()
class ChatConsumer(WebsocketConsumer):
def fetch_messages(self, data):
messages = Message.last_10_messages()
content = {
'command': 'messages',
'messages': self.messages_to_json(messages)
}
self.send_chat_message(content)
def new_message(self, data):
author = data['from']
author_user = User.objects.filter(username=author)[0]
message = Message.objects.create(
author=author_user,
content=data['message'])
content = {
'command': 'new_message',
'message': self.message_to_json(message)
}
return self.send_chat_message(content)
def messages_to_json(self, messages):
result = []
for message in messages:
result.append(self.message_to_json(message))
return result
def message_to_json(self, message):
return {
'author': message.author.username,
'content': message.content,
'timestamp': str(message.timestamp)
}
commands = {
'fetch_messages': fetch_messages,
'new_message': new_message
}
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
def receive(self, text_data):
data = json.loads(text_data)
self.commands[data['command']](self, data)
def send_chat_message(self, message):
async_to_sync (self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
def send_message(self, message):
self.send(text_data=json.dumps(message))
def chat_message(self, event):
message = event['message']
self.send(text_data=json.dumps(message))
| [
"[email protected]"
] | |
500bc23b8f567673adf8a7087e0d61f3f10dfa48 | 674f2e52a127806305f62ecdbbf49dc74c49957e | /18.6.py | d72a5dfb5c872f546b71efbe3d6911b0f493938d | [] | no_license | Dashylikkkkk/pythonProject3 | 14365fa1d52d61870e5b3e8c6f342293aec3ac8d | 0a816b77f35b02d67b6fc40f9407808c79bcdab2 | refs/heads/master | 2023-05-13T06:08:37.629703 | 2021-05-29T12:14:52 | 2021-05-29T12:14:52 | 367,468,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | def line(s, t):
if float(t.split(';')[1]) == float(s.split('x')[0]) * float(t.split(';')[0]) + float(s.split('x')[1]):
print(True)
else:
print(False)
| [
"[email protected]"
] | |
41cab2ce5452c1cd30e28a37027569807a11c4a5 | 77fde9e697346dc178f4a84be0fbe948f542d616 | /hello_django/hello_django/settings.py | 3cd0bfc1feb193be04d8a2591c966ab2b43f868d | [] | no_license | ferreirathiago/hello_django | 0c0846c6efc2f211376a8351a00159881000dec0 | 8d5ada841176cc51be8272851b4e85ae386fd091 | refs/heads/master | 2020-12-08T19:38:35.373314 | 2020-01-10T15:38:11 | 2020-01-10T15:38:11 | 233,076,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | """
Django settings for hello_django project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fxe$vjx96dtp)7%la(fc%e@g@$f$!dmvc3=xl+oiqmsygug@-2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
afb6b1fd48b34dd36ee93a9677980ebe26ad0c20 | 6c6668d337dae3779e679447bb52648df13601f7 | /Miscellaneous/Selection Sort 2.py | a438700e81a5c09aec15d7d4507c0b70e417ccd6 | [] | no_license | Suryanshg/Python-2.7 | 74b812691d1cb453048fb99a915da29e04bc52a0 | 7f51439e21534d0db70d3bfdc3d7ec32346e12da | refs/heads/master | 2020-05-03T17:56:40.835588 | 2019-04-16T14:44:00 | 2019-04-16T14:44:00 | 178,753,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | while True:
def sort(L,c):
for i in range(len(L)):
ma=L[i]
for j in range(i+1,len(L)):
if (L[i]<L[j]):
ma=L[j]
L[j]=L[i]
L[i]=ma
if (c=='d'):
print L
else:
L.reverse()
print L
L=input("Enter a List:")
c=raw_input("Enter 'a' for ascending and 'd' for descending:")
sort(L,c)
| [
"[email protected]"
] | |
198267fe7897a7771e9569fa5b7fb5dc877d1e3e | 20174998be9c95bc810f47ad394d64680fbf9438 | /bert_train.py | 87a783c306e58cb6a5c26555cb37190382c06cf3 | [] | no_license | dheeraj7596/metaguide | 5370bd02edd42e622da8077275441a870f4ebdbd | 168d754fc9bbcbd5f04146d0c7c65e3b1e856f31 | refs/heads/master | 2020-12-07T14:40:55.804669 | 2020-10-04T17:38:27 | 2020-10-04T17:38:27 | 232,739,789 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,527 | py | from transformers import BertForSequenceClassification, BertTokenizer, AdamW, BertConfig, \
get_linear_schedule_with_warmup
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
import torch
import numpy as np
import time
import random
import datetime
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def bert_tokenize(tokenizer, sentences, labels):
input_ids = []
attention_masks = []
# For every sentence...
for sent in sentences:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
max_length=512, # Pad & truncate all sentences.
pad_to_max_length=True,
return_attention_mask=True, # Construct attn. masks.
return_tensors='pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
# Print sentence 0, now as a list of IDs.
print('Original: ', sentences[0])
print('Token IDs:', input_ids[0])
return input_ids, attention_masks, labels
def create_data_loaders(dataset):
# Calculate the number of samples to include in each set.
train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size
# Divide the dataset by randomly selecting samples.
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
# The DataLoader needs to know our batch size for training, so we specify it
# here. For fine-tuning BERT on a specific task, the authors recommend a batch
# size of 16 or 32.
batch_size = 32
# Create the DataLoaders for our training and validation sets.
# We'll take training samples in random order.
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler=RandomSampler(train_dataset), # Select batches randomly
batch_size=batch_size # Trains with this batch size.
)
# For validation the order doesn't matter, so we'll just read them sequentially.
validation_dataloader = DataLoader(
val_dataset, # The validation samples.
sampler=SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size=batch_size # Evaluate with this batch size.
)
return train_dataloader, validation_dataloader
def train(train_dataloader, validation_dataloader, device, num_labels):
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels=num_labels, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions=False, # Whether the model returns attentions weights.
output_hidden_states=False, # Whether the model returns all hidden-states.
)
if device == torch.device("cuda"):
model.cuda()
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
# I believe the 'W' stands for 'Weight Decay fix"
optimizer = AdamW(model.parameters(),
lr=2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps=1e-8 # args.adam_epsilon - default is 1e-8.
)
# Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 4, but we'll see later that this may be over-fitting the
# training data.
epochs = 4
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps)
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
if device == torch.device("cuda"):
torch.cuda.manual_seed_all(seed_val)
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# It returns different numbers of parameters depending on what arguments
# arge given and what flags are set. For our useage here, it returns
# the loss (because we provided labels) and the "logits"--the model
# outputs prior to activation.
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
(loss, logits) = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time() - total_t0)))
return model
def evaluate(model, prediction_dataloader, device):
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions, true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = outputs[0]
# Move logits and labels to CPU
logits = torch.softmax(logits, dim=-1).detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
return predictions, true_labels
def test(model, X_test, y_test, use_gpu=False):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
input_ids, attention_masks, labels = bert_tokenize(tokenizer, X_test, y_test)
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
batch_size = 32
# Create the DataLoader.
prediction_data = TensorDataset(input_ids, attention_masks, labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
predictions, true_labels = evaluate(model, prediction_dataloader, device)
return predictions
def train_bert(X, y, use_gpu=False):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
input_ids, attention_masks, labels = bert_tokenize(tokenizer, X, y)
# Combine the training inputs into a TensorDataset.
dataset = TensorDataset(input_ids, attention_masks, labels)
# Create a 90-10 train-validation split.
train_dataloader, validation_dataloader = create_data_loaders(dataset)
# Tell pytorch to run this model on the GPU.
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = train(train_dataloader, validation_dataloader, device, num_labels=len(set(y)))
return model
| [
"[email protected]"
] | |
35c684815ba14b3ee735f70e4964fb9e0e979cf6 | 0f81cbd7cc67e1a51a3ce57d13da4d5477c496d4 | /pythons/sklearn/learn_SVM.py | 20297950c2cf8a37214ec674b12570c75de6e8ac | [] | no_license | Nisoka/somethings | 16b7dd897e4ed6a203a0e355be09ba432b6f396b | b72475deb074b093b88d31abfcc23f8dfea911f6 | refs/heads/master | 2020-04-03T15:54:48.020651 | 2019-08-22T02:09:29 | 2019-08-22T02:09:29 | 155,382,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,027 | py | # from sklearn import svm
#
# def trainSVM(train_xs, train_ys, decision='ovr'):
# clf = svm.SVC(decision_function_shape='ovr', kernel='poly', probability=True, C=11, coef0=11)
# clf.fit(train_xs, train_ys)
# return clf
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
# print(clf.coef_)
w = clf.coef_[0]
a = - w[0] / w[1]
print(type(clf.kernel))
x1 = np.linspace(xx.min(), xx.max())
y1 = a * x1 - (clf.intercept_[0]) / w[1]
# Plot the hyperplane
print(x1, y1)
ax.plot(x1, y1, color='navy')
# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Z = Z.reshape(xx.shape)
# out = ax.contourf(xx, yy, Z, **params)
return ax
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
print(X)
print(y)
train_y = np.array([_y for _y in y if _y < 2])
train_x = np.array([X[i] for i in range(len(train_y))])
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
# svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C)
)
models = (clf.fit(train_x, train_y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = train_x[:, 0], train_x[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=train_y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show() | [
"[email protected]"
] | |
87526ab4de5e25639132b53199bbe9eba15b373b | 85e078ee3ceda5091624233ca19ba42f78747499 | /LeetCode/smallest_subtree_with_all_the_deepest_nodes.py | 879c6a1c4c2fb0ab8bb56f936b3cb6a0ef4572aa | [] | no_license | papayetoo/StudyinPython | d5e6ec0cff0e97fcc4afc8d846e3658c06eb67c2 | f686b6e08720ad4d7d57b41d24c63c4bfa64dd90 | refs/heads/master | 2021-07-22T04:05:38.993123 | 2021-02-03T14:12:26 | 2021-02-03T14:12:26 | 240,009,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # Definition for a binary tree node.
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def subtreeWithAllDeepest(self, root):
# Tag each node with it's depth.
depth = {None: -1}
def dfs(node, parent = None):
if node:
depth[node] = depth[parent] + 1
dfs(node.left, node)
dfs(node.right, node)
dfs(root)
max_depth = max(depth.values())
def answer(node):
# Return the answer for the subtree at node.
if not node or depth.get(node, None) == max_depth:
if node:
print(node.val)
return node
L, R = answer(node.left), answer(node.right)
return node if L and R else L or R
return answer(root)
| [
"[email protected]"
] | |
538d071e2c58a84c9e37f5d44356d7df7b7c46b2 | 3c645f28d89820abfc9fdb82cbe54f034850f6c1 | /exeteraeval/import_patients_dask.py | de849f608110d1bb3028d95e7ece03999c1df1dc | [
"Apache-2.0"
] | permissive | KCL-BMEIS/ExeTeraEval | 8cc861ad81877df9e10fb54c9cea9c28e1c1e759 | c6ef1485aff08fba17cd328a76fcc398c757255d | refs/heads/main | 2023-06-03T22:14:16.671092 | 2021-06-23T08:41:30 | 2021-06-23T08:41:30 | 316,761,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | import sys
from collections import OrderedDict
import json
import time
import pandas as pd
import dask
import dask.dataframe as ddf
def dtype_from_schema(entry):
ft = entry['field_type']
if ft == 'categorical':
if 'out_of_range' in entry['categorical']:
return 'object'
else:
# return 'category'
return 'object'
elif ft == 'numeric':
if entry['value_type'] in ('uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'bool'):
return 'float32'
else:
return 'float64'
else:
return 'object'
def go(schema_filename, table, src_filename, dest_filename):
with open(schema_filename, 'r') as f:
schema = json.load(f)
schema = schema['schema'][table]['fields']
column_dtypes = OrderedDict()
for fk, fv in schema.items():
column_dtypes[fk] = dtype_from_schema(fv)
for fk, fv in column_dtypes.items():
print(fk, fv)
t0 = time.time()
df = ddf.read_csv(src_filename, dtype=column_dtypes)
# df.compute()
# print(df)
# print('to_hdf')
df.to_hdf(dest_filename, '/data', lock=dask.utils.SerializableLock())
print(time.time() - t0)
# t0 = time.time()
# print('read csv')
# df = pd.read_csv(src_filename) #, dtype=column_dtypes)
# print('from pandas')
# df = ddf.from_pandas(df, npartitions=1)
# print('to hdf')
# df.to_hdf(dest_filename, '/data', format='table')
# print(time.time() - t0)
t0 = time.time()
df2 = ddf.read_hdf(dest_filename, '/data')
print(df2.compute())
print(time.time() - t0)
if __name__ == '__main__':
if len(sys.argv) != 5:
print("Usage: import_patients_pandas.py "
"<schema_filename> <table> <source filename> <destination filename>")
exit(-1)
t0 = time.time()
try:
go(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
except Exception as e:
print("failed after", time.time() - t0)
raise
| [
"[email protected]"
] | |
015a6f53a7b2208d9135e32d9d7b1f24ef8272a4 | ca07351db12c08244bd16ed4ea87f20a929a4870 | /blog/models.py | 88bc5847d751b90cfadece8a3ac7f5c79d0f0a5e | [] | no_license | kassa-diss/dspredictor-master | a90a2533a4c9b65e3989c1a0019d2f669210b601 | 0e91842c14eec07c6296686abc5e9197cbdbb941 | refs/heads/main | 2023-06-27T13:47:13.662682 | 2021-08-01T12:24:51 | 2021-08-01T12:24:51 | 355,596,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.conf import settings
class PostManager(models.Manager):
def like_toggle(self, user, post_obj):
if user in post_obj.liked.all():
is_liked = False
post_obj.liked.remove(user)
else:
is_liked = True
post_obj.liked.add(user)
return is_liked
class Post(models.Model):
author = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
brief = models.TextField(max_length=500,default="")
content = models.TextField()
liked = models.ManyToManyField(
settings.AUTH_USER_MODEL, blank=True, related_name='liked')
date_posted = models.DateTimeField(default=timezone.now)
pic = models.ImageField(upload_to='photos/%Y/%m/%d/', default='photos\2021\04\06\01.png')
objects = PostManager()
class Meta:
ordering = ('-date_posted', )
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', kwargs={'pk': self.pk})
class Comment(models.Model):
post = models.ForeignKey(
Post, related_name='comments', on_delete=models.CASCADE)
author = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=True)
def approve(self):
self.approved_comment = True
self.save()
def get_absolute_url(self):
return reverse("post_list")
def __str__(self):
return self.author
# Create your models here.
| [
"[email protected]"
] | |
0160e702928416c0796466a1e2b85f9c8dcd39e8 | 80297c81618831adc72714ca1fa2f172b375b8ea | /pandas/io/parsers/c_parser_wrapper.py | e8909f542f335680cbd31f48c992ee3d3ac2ba32 | [
"BSD-3-Clause"
] | permissive | abmyii/pandas | 61f852957c54e3517f9ff899f79cfefa4fffbc57 | 5bf346c7e26ae74c1e02c433cdf370de4adb7ac1 | refs/heads/master | 2022-08-26T11:13:51.846587 | 2022-02-12T18:33:18 | 2022-02-12T18:33:18 | 228,681,300 | 0 | 0 | BSD-3-Clause | 2019-12-17T18:50:37 | 2019-12-17T18:50:36 | null | UTF-8 | Python | false | false | 14,933 | py | from __future__ import annotations
from typing import (
Hashable,
Mapping,
Sequence,
)
import warnings
import numpy as np
import pandas._libs.parsers as parsers
from pandas._typing import (
ArrayLike,
DtypeArg,
DtypeObj,
ReadCsvBuffer,
)
from pandas.errors import DtypeWarning
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_categorical_dtype,
pandas_dtype,
)
from pandas.core.dtypes.concat import union_categoricals
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas import (
Index,
MultiIndex,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.io.parsers.base_parser import (
ParserBase,
is_index_col,
)
class CParserWrapper(ParserBase):
low_memory: bool
_reader: parsers.TextReader
def __init__(self, src: ReadCsvBuffer[str], **kwds):
super().__init__(kwds)
self.kwds = kwds
kwds = kwds.copy()
self.low_memory = kwds.pop("low_memory", False)
# #2442
# error: Cannot determine type of 'index_col'
kwds["allow_leading_cols"] = (
self.index_col is not False # type: ignore[has-type]
)
# GH20529, validate usecol arg before TextReader
kwds["usecols"] = self.usecols
# Have to pass int, would break tests using TextReader directly otherwise :(
kwds["on_bad_lines"] = self.on_bad_lines.value
for key in (
"storage_options",
"encoding",
"memory_map",
"compression",
"error_bad_lines",
"warn_bad_lines",
):
kwds.pop(key, None)
kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
self._reader = parsers.TextReader(src, **kwds)
self.unnamed_cols = self._reader.unnamed_cols
# error: Cannot determine type of 'names'
passed_names = self.names is None # type: ignore[has-type]
if self._reader.header is None:
self.names = None
else:
# error: Cannot determine type of 'names'
# error: Cannot determine type of 'index_names'
(
self.names, # type: ignore[has-type]
self.index_names,
self.col_names,
passed_names,
) = self._extract_multi_indexer_columns(
self._reader.header,
self.index_names, # type: ignore[has-type]
passed_names,
)
# error: Cannot determine type of 'names'
if self.names is None: # type: ignore[has-type]
if self.prefix:
# error: Cannot determine type of 'names'
self.names = [ # type: ignore[has-type]
f"{self.prefix}{i}" for i in range(self._reader.table_width)
]
else:
# error: Cannot determine type of 'names'
self.names = list( # type: ignore[has-type]
range(self._reader.table_width)
)
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
# error: Cannot determine type of 'names'
self.orig_names = self.names[:] # type: ignore[has-type]
if self.usecols:
usecols = self._evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
# assert for mypy, orig_names is List or None, None would error in issubset
assert self.orig_names is not None
if self.usecols_dtype == "string" and not set(usecols).issubset(
self.orig_names
):
self._validate_usecols_names(usecols, self.orig_names)
# error: Cannot determine type of 'names'
if len(self.names) > len(usecols): # type: ignore[has-type]
# error: Cannot determine type of 'names'
self.names = [ # type: ignore[has-type]
n
# error: Cannot determine type of 'names'
for i, n in enumerate(self.names) # type: ignore[has-type]
if (i in usecols or n in usecols)
]
# error: Cannot determine type of 'names'
if len(self.names) < len(usecols): # type: ignore[has-type]
# error: Cannot determine type of 'names'
self._validate_usecols_names(
usecols,
self.names, # type: ignore[has-type]
)
# error: Cannot determine type of 'names'
self._validate_parse_dates_presence(self.names) # type: ignore[has-type]
self._set_noconvert_columns()
# error: Cannot determine type of 'names'
self.orig_names = self.names # type: ignore[has-type]
if not self._has_complex_date_col:
# error: Cannot determine type of 'index_col'
if self._reader.leading_cols == 0 and is_index_col(
self.index_col # type: ignore[has-type]
):
self._name_processed = True
(
index_names,
# error: Cannot determine type of 'names'
self.names, # type: ignore[has-type]
self.index_col,
) = self._clean_index_names(
# error: Cannot determine type of 'names'
self.names, # type: ignore[has-type]
# error: Cannot determine type of 'index_col'
self.index_col, # type: ignore[has-type]
)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
assert self.index_names is not None
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def close(self) -> None:
# close handles opened by C parser
try:
self._reader.close()
except ValueError:
pass
def _set_noconvert_columns(self) -> None:
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
assert self.orig_names is not None
# error: Cannot determine type of 'names'
# much faster than using orig_names.index(x) xref GH#44106
names_dict = {x: i for i, x in enumerate(self.orig_names)}
col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
# error: Cannot determine type of 'names'
noconvert_columns = self._set_noconvert_dtype_columns(
col_indices,
self.names, # type: ignore[has-type]
)
for col in noconvert_columns:
self._reader.set_noconvert(col)
def read(
self,
nrows: int | None = None,
) -> tuple[
Index | MultiIndex | None,
Sequence[Hashable] | MultiIndex,
Mapping[Hashable, ArrayLike],
]:
index: Index | MultiIndex | None
column_names: Sequence[Hashable] | MultiIndex
try:
if self.low_memory:
chunks = self._reader.read_low_memory(nrows)
# destructive to chunks
data = _concatenate_chunks(chunks)
else:
data = self._reader.read(nrows)
except StopIteration:
if self._first_chunk:
self._first_chunk = False
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = self._get_empty_meta(
names,
self.index_col,
self.index_names,
dtype=self.kwds.get("dtype"),
)
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
col_dict = {k: v for k, v in col_dict.items() if k in columns}
return index, columns, col_dict
else:
self.close()
raise
# Done with first read, next time raise StopIteration
self._first_chunk = False
# error: Cannot determine type of 'names'
names = self.names # type: ignore[has-type]
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError("file structure not yet supported")
# implicit index, no index names
arrays = []
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i, try_parse_dates=True)
arrays.append(values)
index = ensure_index_from_sequences(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
names = self._maybe_dedup_names(names)
# rename dict keys
data_tups = sorted(data.items())
data = {k: v for k, (i, v) in zip(names, data_tups)}
column_names, date_data = self._do_date_conversions(names, data)
# maybe create a mi on the columns
column_names = self._maybe_make_multi_index_columns(
column_names, self.col_names
)
else:
# rename dict keys
data_tups = sorted(data.items())
# ugh, mutation
# assert for mypy, orig_names is List or None, None would error in list(...)
assert self.orig_names is not None
names = list(self.orig_names)
names = self._maybe_dedup_names(names)
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data_tups]
if self.usecols is None:
self._check_data_length(names, alldata)
data = {k: v for k, (i, v) in zip(names, data_tups)}
names, date_data = self._do_date_conversions(names, data)
index, column_names = self._make_index(date_data, alldata, names)
return index, column_names, date_data
def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:
# hackish
usecols = self._evaluate_usecols(self.usecols, names)
if usecols is not None and len(names) != len(usecols):
names = [
name for i, name in enumerate(names) if i in usecols or name in usecols
]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names, self.index_col) = self._clean_index_names(
names, self.index_col
)
return names, idx_names
def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
"""
Concatenate chunks of data read with low_memory=True.
The tricky part is handling Categoricals, where different chunks
may have different inferred categories.
"""
names = list(chunks[0].keys())
warning_columns = []
result = {}
for name in names:
arrs = [chunk.pop(name) for chunk in chunks]
# Check each arr for consistent types.
dtypes = {a.dtype for a in arrs}
# TODO: shouldn't we exclude all EA dtypes here?
numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
if len(numpy_dtypes) > 1:
# error: Argument 1 to "find_common_type" has incompatible type
# "Set[Any]"; expected "Sequence[Union[dtype[Any], None, type,
# _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]]"
common_type = np.find_common_type(
numpy_dtypes, # type: ignore[arg-type]
[],
)
if common_type == object:
warning_columns.append(str(name))
dtype = dtypes.pop()
if is_categorical_dtype(dtype):
result[name] = union_categoricals(arrs, sort_categories=False)
else:
if isinstance(dtype, ExtensionDtype):
# TODO: concat_compat?
array_type = dtype.construct_array_type()
# error: Argument 1 to "_concat_same_type" of "ExtensionArray"
# has incompatible type "List[Union[ExtensionArray, ndarray]]";
# expected "Sequence[ExtensionArray]"
result[name] = array_type._concat_same_type(
arrs # type: ignore[arg-type]
)
else:
# Argument 1 to "concatenate" has incompatible type
# "List[Union[ExtensionArray, ndarray[Any, Any]]]"; expected
# "Union[_SupportsArray[dtype[Any]],
# Sequence[_SupportsArray[dtype[Any]]],
# Sequence[Sequence[_SupportsArray[dtype[Any]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]],
# Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
result[name] = np.concatenate(arrs) # type: ignore[arg-type]
if warning_columns:
warning_names = ",".join(warning_columns)
warning_message = " ".join(
[
f"Columns ({warning_names}) have mixed types. "
f"Specify dtype option on import or set low_memory=False."
]
)
warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
return result
def ensure_dtype_objs(
dtype: DtypeArg | dict[Hashable, DtypeArg] | None
) -> DtypeObj | dict[Hashable, DtypeObj] | None:
"""
Ensure we have either None, a dtype object, or a dictionary mapping to
dtype objects.
"""
if isinstance(dtype, dict):
return {k: pandas_dtype(dtype[k]) for k in dtype}
elif dtype is not None:
return pandas_dtype(dtype)
return dtype
| [
"[email protected]"
] | |
160b2a9bafde75264ce75c06f5673e2489becdf4 | 30e568f5feccea6c87a284c47d65ade652fe9f61 | /blog/views.py | b6d8713a243724b44739a8a3bbb5d42b43cf3267 | [] | no_license | lukaszszajkowski/anitablog | c923ec367f76a2e26feb1145cd330628acaf4076 | d108379d4266a5e1a4cbbab0c316341f623ebf80 | refs/heads/master | 2021-01-01T05:39:10.021870 | 2014-08-19T12:55:12 | 2014-08-19T12:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | from django.shortcuts import render
# Create your views here.
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.shortcuts import render
from .models import Post, Comment
from .forms import PostForm, CommentForm
def home(request):
return render(request, 'index.html', {'test': "test"})
def list_posts(request):
form = CommentForm(request.POST or None)
posts = Post.objects.all()
comments = Comment.objects.all()
return render(request, 'posts.html', {'posts': posts, 'form': form, 'comments': comments})
@user_passes_test(lambda u: u.is_superuser)
def add_post(request):
form = PostForm(request.POST or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect(post)
return render(request, 'add_post.html', {'form': form})
def view_post(request, slug):
post = get_object_or_404(Post, slug=slug)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
request.session["name"] = comment.name
request.session["email"] = comment.email
return redirect(request.path)
form.initial['name'] = request.session.get('name')
form.initial['email'] = request.session.get('email')
return render(request, 'posts.html', {'post': post, 'form': form, })
def detail_post(request, slug):
post = Post.objects.get(slug=slug)
context = {
'post': post,
}
return render(request, 'detail_post.html', context)
def add_comment(request, slug):
post = get_object_or_404(Post, slug=slug)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
request.session["name"] = comment.name
request.session["email"] = comment.email
return redirect(reverse('list_posts'))
form.initial['name'] = request.session.get('name')
form.initial['email'] = request.session.get('email')
return render(request, 'add_comment.html', {
'post': post,
'form': form,
}) | [
"[email protected]"
] | |
ac9274e51003d35b9861619abcc7751dda7e1aaa | e6a0bb9235cf36697fbf41084e4506e785d530ac | /git/Alice-test/alice-redis-backup-op.py | 9d939ed23f2bb3f9966ea26a197d46ddd415bc7d | [] | no_license | githkm/playbooks | f1f185ab7185bb32906b3e75618960af5f060714 | e00ebeff2654eb2d0b6eb01a6c667dc957e3f414 | refs/heads/master | 2020-07-24T06:47:26.515251 | 2019-10-30T03:42:18 | 2019-10-30T03:42:18 | 207,834,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,780 | py | #!/root/.pyenv/shims/python
#coding: utf-8
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.redis.v20180412 import redis_client, models
from datetime import datetime, timedelta, timezone
import getopt, sys, json, wget
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
cn_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
current_bj_time = cn_dt.strftime("%Y-%m-%d %H:%M")
current_bj_date = cn_dt.strftime("%Y-%m-%d")
specifies_bj_time = "%s 04:" % current_bj_date
cred = credential.Credential("AKIDgAPSJzIcaNuCH7J4A4mERRBgw0pVEEI9", "lGYaQ0KRPBtKizDJjuqY1FHDzby75oIX")
httpProfile = HttpProfile()
httpProfile.endpoint = "redis.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = redis_client.RedisClient(cred, "ap-tokyo", clientProfile)
BackupId = None
def getbackuplist(InstanceId):
try:
req = models.DescribeInstanceBackupsRequest()
params = '{"InstanceId":"%s"}' % InstanceId
# print("params",params)
req.from_json_string(params)
resp = client.DescribeInstanceBackups(req)
# print(resp.to_json_string())
return resp.to_json_string()
except TencentCloudSDKException as err:
print(err)
def manualbackup(InstanceId, current_bj_time):
try:
req = models.ManualBackupInstanceRequest()
params = '{"InstanceId":"%s", "Remark": "%s"}' % (InstanceId, current_bj_time)
req.from_json_string(params)
resp = client.ManualBackupInstance(req)
print(resp.to_json_string())
return resp.to_json_string()
except TencentCloudSDKException as err:
print(err)
def geturl(InstanceId, BackupId):
try:
req = models.DescribeBackupUrlRequest()
params = '{"InstanceId":"%s","BackupId":"%s"}' % (InstanceId, BackupId)
req.from_json_string(params)
resp = client.DescribeBackupUrl(req)
return resp.to_json_string()
except TencentCloudSDKException as err:
print(err)
def usage():
print("Instances ID List:", instance_ids)
print('''usage: alice-redis-backup-op.py [options] arg1 ...'
options:
-h, Show This Help Message And Exit
--create At Current Time Create Backup
--list List All Backup
--autodown Auto Download The Crontab Backup At 04:00 Everyday
--download InstanceID BackupID Download Specifies Instance And Specifies Backup
''')
if __name__ == "__main__":
options = None
args = None
tmp_dic = {}
#instance_ids = {"172.16.6.3": "crs-c3hjmvm8", "172.16.6.12": "crs-qjdj8zv8"}
instance_ids = {"172.16.6.3": "crs-c3hjmvm8", "172.16.6.20": "crs-qzf91dlm", "172.16.6.12": "crs-qjdj8zv8", "172.16.6.44": "crs-n0u1xupg"}
if len(sys.argv) == 1:
usage()
exit(1)
try:
options, args = getopt.getopt(sys.argv[1:], "h", ['create', 'list', 'download', 'autodown'])
except Exception as e:
print(str(e))
exit(1)
for name, value in options:
if name == '--list':
for k in instance_ids:
res = getbackuplist(instance_ids[k])
tmp_dic.update({k: json.loads(res)['BackupSet']})
for k, v in tmp_dic.items():
print(k, instance_ids[k], v[0])
elif name == '--autodown':
for k in instance_ids:
res = getbackuplist(instance_ids[k])
for each in json.loads(res)['BackupSet']:
if specifies_bj_time in each['StartTime']:
url = geturl(instance_ids[k], each['BackupId'])
tmp_dic.update({k: json.loads(url)['InnerDownloadUrl'][0]})
print(tmp_dic)
for k in tmp_dic:
wget.download(tmp_dic[k],"/home/www/%s-dump.rdb" % k)
elif name == '--create':
for k in instance_ids:
res = manualbackup(instance_ids[k], current_bj_time)
elif name == '--download':
if len(args) != 2:
usage()
exit(255)
url = json.loads(geturl(args[0], args[1]))['InnerDownloadUrl'][0]
print('URL:',url)
for k in instance_ids:
if instance_ids[k] == args[0]:
wget.download(url,k)
elif name == "-h":
usage()
exit(0)
| [
"[email protected]"
] | |
af670c7de09800202230e69c4c1e374c784f1b0a | edc4a15788b8c1d11ea0496a588e63f9e7ae2712 | /csv_to_blenderspheres.py | d248abd159e686db522157912b2566c097c3e3d9 | [] | no_license | BradhamLab/Blender | e716f322f54cfb922cfcf3aa707a665ba3762b30 | a3bf8234857b9788a0823715763c6561a5776be0 | refs/heads/master | 2020-03-25T19:07:22.761071 | 2018-08-08T20:55:18 | 2018-08-08T20:55:18 | 144,065,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,198 | py | import sys
def generate_blender_line(coordinates, blenderLayer):
# Write a line to generate a sphere object in Blender.
varLayer = ["False","False","False","False","False","False","False","False","False","False","False","False","False","False","False","False","False","False","False","False"]
varLayer[blenderLayer] = "True"
layerStr = ','.join(str(layer) for layer in varLayer)
return "bpy.ops.mesh.primitive_uv_sphere_add(segments=32, ring_count=16, size=0.5, view_align=False, enter_editmode=False, location=(" + coordinates + "), rotation=(0,0,0), layers=(" + layerStr + "))"
def write_blender_script(outputFile):
#Iterates through list of xyz coordinates, then writes each line of Blender code to an output file
coordLayer = 0 #designates the desired Blender layer to add sphere objects to. Layer 1 in Blender = coordLayer 0
with open(outputFile, 'w') as out:
for xyz in coordList:
if xyz == ',,' or '': #increases blender layer for coordinates following a blank line in the csv file
coordLayer +=1
else:
line = generate_blender_line(xyz, coordLayer)
out.write(line + '\n')
def usage():
str="This script generates a text file containing a series of blender inputs, given a .csv file containing xyz coordinates\
\n\n\nThe script takes two files as input, an input file name and an output \
file name. The input file is expected to be a .csv file, while the output file\
is expected to be .txt file. To run the script issue the following command:\n\n\n\
\tpython csv_to_blenderspheres.py <input_file> <output_file>\n\n\n\
For additional information consult the ReadMe at http://www.github.com/BradhamLab/Blender\n\n\n"
print(str)
args = sys.argv[1:]
if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], str):
if args[0].endswith('.csv') and args[1].endswith('.txt'):
with open(args[0]) as f:
content = f.readlines()
for crudeList in content:
coordList = crudeList.split('\r')
write_blender_script(args[1])
else:
usage()
else:
usage()
| [
"[email protected]"
] | |
dc014c62864afc9910148aede58f63bbc87f6677 | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /sdk/python/test_data/pipelines/pipeline_with_retry.py | 3a2d8a506bc7d2e1e9d3cd667fccdf4249d58f5e | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 1,005 | py | # Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import compiler
from kfp import dsl
@dsl.component
def add(a: float, b: float) -> float:
return a + b
@dsl.pipeline(name='test-pipeline')
def my_pipeline(a: float = 1, b: float = 7):
add_task = add(a=a, b=b)
add_task.set_retry(num_retries=3)
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
package_path=__file__.replace('.py', '.yaml'))
| [
"[email protected]"
] | |
18afb092fa3604ed28ba2383053af28dc9cd73d5 | 13d3954b5c26984b5f0d9eb071fff4d3b030c7da | /github/api.py | 5c3ff0934b1090cac9a1ce82d5bf71e8f62837c7 | [] | no_license | gsdenys/git-stat | e884bfcaa8244675ec52216da03b5c40a9760bd3 | 2fdfef5a068d7a5e49ab14fa8301530001850397 | refs/heads/main | 2023-03-17T13:18:07.656343 | 2021-03-10T17:43:25 | 2021-03-10T17:43:25 | 329,178,172 | 0 | 0 | null | 2021-02-08T21:26:34 | 2021-01-13T03:05:37 | Jupyter Notebook | UTF-8 | Python | false | false | 867 | py | import requests as req
# from config import Config
class Worker:
""" Worker class to helps implementation of github API actions """
def __init__(self, config):
"""Default Builder
Args:
config (Config): The configuratin object
"""
self._config = config
# Create the HTTP requeste header
self._headers = req.utils.default_headers()
self._headers.update({'Authorization': 'token ' + config.getToken()})
def get(self, url):
"""Function to perform a HTTP GET request through any URL.
Args:
url (str): The http URL
Returns:
JSON: The json-encoded content of a response, if any.
"""
data = req.get(url, headers=self._headers)
return data.json()
def getConf(self):
return self._config
| [
"[email protected]"
] | |
7a33b8340748fbc641c270e612ef69eba661c3b4 | e2bb4e21acf004c9f50fa1446390755dd966fa09 | /Tests/test_MathOperations.py | aab7a0183d15844ab6c97534564955f379184259 | [] | no_license | HGNJIT/statsCalculator | 45f933ab4fb9576b96a4668fbfaf994584caec03 | 4fc9ac753921e6ac784afe071b84fbabf31c925c | refs/heads/master | 2023-03-23T18:13:53.563727 | 2021-03-23T03:40:25 | 2021-03-23T03:40:25 | 343,956,928 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | import unittest
from Calculator.calculator import Calculator
class MyTestCase(unittest.TestCase):
def setUp(self):
self.calculator = Calculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, Calculator)
def test_calculator_return_sum(self):
result = self.calculator.Sum(1, 2)
self.assertEqual(3, result)
def test_calculator_return_sumList(self):
numlist = [1, 3, 5, 2]
result = self.calculator.Sum(numlist)
self.assertEqual(11, result)
def test_calculator_return_difference(self):
result = self.calculator.Difference(4, 1)
self.assertEqual(3, result)
def test_MathOperation_Product(self):
self.assertEqual(10, self.calculator.Product(2, 5))
def test_MathOperations_Product_list(self):
numlist = [1, 3, 5]
self.assertEqual(15, self.calculator.Product(numlist))
def test_MathOperations_Power(self):
self.assertEqual(8, self.calculator.Power(2, 3))
def test_MathOperations_Power_list(self):
numlist = [1, 2, 3]
self.assertEqual(9, self.calculator.Power(numlist, 2))
def test_Root(self):
self.assertEqual(3, self.calculator.Root(2,9)) | [
"[email protected]"
] | |
3024d9f2053d0621237ff0d4fa0459671876a57a | 5c09a68e01be442e7184e8a40b32305504ad3aae | /src/modules/helper/wscript | 3daf05b53462e344ada34db1cf2047789e9bd28c | [] | no_license | jakobluettgau/feign | 8178ff99d7fb3da6abc97f5a9715ccffed9ae040 | 0c735283923253a6cb51a28fe4d3299cc2a2a86f | refs/heads/master | 2016-09-10T14:59:51.370767 | 2015-03-06T12:18:01 | 2015-03-06T12:18:01 | 17,636,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | #! /usr/bin/env python
def options(ctx):
ctx.load("compiler_c compiler_cxx")
pass
def configure(ctx):
ctx.load("compiler_c compiler_cxx")
ctx.check_cfg(package='glib-2.0', args='--cflags --libs')
ctx.env.LINKFLAGS = []
#TODO: absolute path
#ctx.env.append_value('INCLUDES', ['../include'])
# configure modules and plugins
pass
def build(ctx):
# build feign executables
ctx.shlib(source=['helper.cpp'], target='feign-helper', uselib=["GLIB-2.0"])
pass
| [
"[email protected]"
] | ||
967c54ebd9e3e7a1f17010694bebc3026a024991 | 440c2f17a64b718227bbc9ac1f799630d0f3233d | /Chapter01_ArrayProblem/lc796.py | b50332bb95b4ae17e7495e3c834eeac162f62da0 | [] | no_license | HuichuanLI/alogritme-interview | 6fc84fdbfe1123c1e587eaf2df6b6e9fb2ca7dda | 0ac672a1582707fcaa6b6ad1f2a1d927034447df | refs/heads/master | 2023-02-05T03:29:14.458783 | 2020-12-25T14:04:17 | 2020-12-25T14:04:17 | 206,583,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | class Solution:
def rotateString(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
if A == B:
return True
for index, a in enumerate(A):
if a != B[0]:
continue
else:
if A[index:] + A[:index] == B:
return True
return False
| [
"[email protected]"
] | |
c1fcb8d442f27e99c3ca9c06c6168d39a909e9e7 | 467a4136897495766b7cbb69890b642ea684d50c | /project/applications/users/serializers.py | 732dceac7c6a149bf801fe707de6e88d917b906e | [] | no_license | abheist/django_angular_webpack | 1eb4e6d766e86dc9203edb4573a5563497d5eca9 | 0ec0b532f08eb5fa7094fb3f84b04b756ee6662a | refs/heads/master | 2021-06-19T22:49:38.154736 | 2017-07-21T21:33:37 | 2017-07-21T21:33:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from rest_framework import serializers
from project.applications.users.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta(object):
model = User
fields = ('id', 'email', 'is_admin', 'is_active',)
| [
"[email protected]"
] | |
f76accf40d09b58d4311d4c39502aaf544b48f91 | 9da129ec93a6fd2c5f65b57a0faec21d8eb80720 | /Term_NSI/devoirs/4-dm2/Corrigé/S5/E10.py | a95413dc1d38cadd7439053c1c59d2d57ee9e86a | [] | no_license | FranckCHAMBON/ClasseVirtuelle | 79bd4e4114d27ca3792b46b1fb384c394397b329 | 48fe9883ee6763e79e561537bc0ed7048339b457 | refs/heads/master | 2022-08-22T15:35:48.128508 | 2021-04-28T07:21:04 | 2021-04-28T07:21:04 | 249,953,475 | 3 | 4 | null | 2022-08-05T09:28:10 | 2020-03-25T10:52:47 | HTML | UTF-8 | Python | false | false | 1,310 | py | """
Prologin: Entraînement 2003
Exercice: 10 - Solitaire
https://prologin.org/train/2003/semifinal/solitaire
"""
plateau = []
liste_billes = []
for ligne in range(7):
entrée = list(input())
plateau.append(entrée)
for colonne in range(7):
if entrée[colonne] == "1":
liste_billes.append((ligne, colonne))
def est_déplaçable(x, y):
"""
Retourne le nombre de déplacements possibles à partir de (`x`,`y`)
"""
nombre_déplacements = 0
if 0 <= x+2 < 7 and 0 <= y < 7:
if plateau[x+1][y] == "1":
if plateau[x+2][y] == "0":
nombre_déplacements += 1
if 0 <= x < 7 and 0 <= y+2 < 7:
if plateau[x][y+1] == "1":
if plateau[x][y+2] == "0":
nombre_déplacements += 1
if 0 <= x-2 < 7 and 0 <= y < 7:
if plateau[x-1][y] == "1":
if plateau[x-2][y] == "0":
nombre_déplacements += 1
if 0 <= x < 7 and 0 <= y-2 < 7:
if plateau[x][y-1] == "1":
if plateau[x][y-2] == "0":
nombre_déplacements += 1
return nombre_déplacements
somme_déplacements = sum(est_déplaçable(x_bille, y_bille) for x_bille, y_bille in liste_billes)
print(somme_déplacements) | [
"[email protected]"
] | |
c62111437c9a53952b4d3ee19e7ba5ee43c9e633 | b41b996b4a14f11bb3d7676b4539725a93c2d586 | /SourceCode/Codesignal-Selenium/SourceNew/Py3/insertDashes.py3 | 7c90291c8490000fd0d77637d4d36bd37deccab2 | [] | no_license | mquandvr/selenium | ee2c0e7febb15f4db4d33a8575726f67c48bde05 | d7bb4c95d4d0756c66cbf3c69387318fc07219f2 | refs/heads/master | 2020-06-05T05:49:57.476867 | 2019-06-18T10:55:27 | 2019-06-18T10:55:27 | 192,335,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py3 | def insertDashes(inputString):
words = inputString.split()
for i in range(len(words)):
words[i] = "-".join(list(words[i]))
return ' '.join(words)
| [
"[email protected]"
] | |
f76c015d05834771b0bdb2b544881a218625789f | 6e1d770c11a94a4f2d5d6f5212da47c5a92f1d46 | /venv/bin/wheel | 2597eccb6f37b54faebc3ec6b50b96098550937c | [] | no_license | ironstein0/myUIC_portal_automatic_registration | aa76eb8f056d92dfb3e5e3a824c278632a04a394 | cb89aa718f9bf845eeb067efe67b5746a7fbc1e1 | refs/heads/master | 2021-08-23T04:55:44.795736 | 2017-12-03T12:20:48 | 2017-12-03T12:20:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | #!/Volumes/jarvis/projects_working_directory/selenium_projects/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
d32998b192faf56572de67cb5f15dc59afc38c2c | 25b30786850d0e18502813f088b2779c2132b67a | /venv/Scripts/easy_install-3.8-script.py | eccfd79e85962afb201029e0e0a472c1f9ad1e52 | [] | no_license | manisreekar/DetectionOfTwitterBots-MINI_PROJECT | 2389ced3e584cd1e4fd52ce7f9251b91381d76a3 | 71fb0cd7ad394a458676c3e909ea479e57df34bc | refs/heads/master | 2022-11-11T21:07:33.232921 | 2020-06-27T11:41:01 | 2020-06-27T11:41:01 | 274,459,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #!C:\Users\manis\PycharmProjects\MachineLearning-Detecting-Twitter-Bots-master\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"[email protected]"
] | |
b513d8cf0065d3b86ae611a8e704f4ec381c14d8 | 97128c3b69666255f5c8583685256db5c61bbe29 | /connectz.py | 308de00a10806ccd42eb3e362b3554deb2b5f99b | [] | no_license | zeeshanshanu14/Connect4 | 43f8838630ac1cfa2e2ee6f542d09124f50844f7 | 7edf05f6fdbc69e37cbe27e52b8f4dcc6515148b | refs/heads/master | 2020-08-13T23:26:53.317468 | 2019-10-14T13:51:12 | 2019-10-14T13:51:12 | 215,056,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,182 | py | import itertools
import sys
import numpy as np
def execute_player_action(game_board, played_loc, player):
if played_loc > np.shape(game_board)[1] - 1:
print(6)
exit(6)
success_flag = 0
changed_board = game_board[:, played_loc]
# iterate that column in reverse as checker will be stacked to top
for x in range(len(changed_board), 0, -1):
index = x - 1
if not changed_board[index]:
changed_board[index] = player
success_flag = 1
break
if success_flag:
game_board[:, played_loc] = changed_board
# lets send the last location changed to reduce search space of game won
return game_board, (index, played_loc)
else:
# illegal row, return 5 as per test cases
print(5)
exit(5)
def construct_game_board(dim):
return np.zeros((dim[0], dim[1]))
def check_horz_win(game_board, last_played_loc, player, win_length):
row = last_played_loc[0]
col = last_played_loc[1]
counter = 1
# if player won in horizontal completion then this checker can be anywhere in conbination in a row
for x in range(col + 1, np.shape(game_board)[1]):
if game_board[row][x] == player:
counter += 1
if counter == win_length:
return True
continue
# if changed location is less the the length and nothing was found on the right, check left as well
if col + 1 < win_length and counter == 1:
return 0
for x in range(col - 1, 0, -1):
if game_board[row][x] == player:
counter += 1
if counter == win_length:
return player
continue
else:
return 0
def check_ver_win(game_board, last_played_loc, player, win_length):
col = last_played_loc[1]
counter = 1
# if plaer won in vertical then his checker will be on top
for x in range(last_played_loc[0] + 1, np.shape(game_board)[0]):
if game_board[x][col] == player:
counter += 1
if counter == win_length:
return player
continue
else:
return 0
def check_diag_win_backslash(game_board, last_played_loc, player, win_length):
col = last_played_loc[1]
row = last_played_loc[0]
# checking backward diagonal
counter = 1
col_forwards = list(range(col, np.shape(game_board)[1]))
row_backwards = list(range(row, 0 - 1, -1))
diag_list = [i for i in itertools.zip_longest(col_forwards, row_backwards)] # zip(col_forwards, row_forwards)
counter = 0
for col_index, row_index in diag_list:
# if player won in horizontal completion then this checker can be anywhere in conbination in a row
if col_index is None or row_index is None:
continue
if game_board[row_index][col_index] == player:
counter += 1
if counter == win_length:
return player
continue
# if changed location is less the the length and nothing was found on the right, check left as well
if col < win_length - 1 and counter == 1:
return 0
# now check starting from next diag element is curr is already counted
col_backwards = list(range(col - 1, 0 - 1, -1))
row_forwards = list(range(row + 1, np.shape(game_board)[0]))
diag_list_down = [i for i in itertools.zip_longest(col_backwards, row_forwards)]
# should iterate backwards as we going down
for col_index, row_index in diag_list_down:
# if player won in horizontal completion then this checker can be anywhere in conbination in a row
if col_index is None or row_index is None:
continue
if game_board[row_index][col_index] == player:
counter += 1
if counter == win_length:
return player
continue
return 0
def check_diag_win_fslash(game_board, last_played_loc, player, win_length):
col = last_played_loc[1]
row = last_played_loc[0]
# checking backward diagonal down first
col_forwards = list(range(col, np.shape(game_board)[1]))
row_backwards = list(range(row, np.shape(game_board)[0]))
diag_list = [i for i in itertools.zip_longest(col_forwards, row_backwards)] # zip(col_forwards, row_forwards)
counter = 0
for col_index, row_index in diag_list:
# if player won in horizontal completion then this checker can be anywhere in conbination in a row
if col_index is None or row_index is None:
continue
if game_board[row_index][col_index] == player:
counter += 1
if counter == win_length:
return player
continue
else:
break
# if changed location is less the the length and nothing was found on the right, check left as well
# if there is not much to be checked and already found match is too less, its not work traversing.
if col < win_length and counter == 1:
return 0
# now check up, current cell is counted for above
col_backwards = list(range(col - 1, 0 - 1, -1))
row_backwards = list(range(row - 1, 0 - 1, -1))
diag_list_up = [i for i in itertools.zip_longest(col_backwards, row_backwards)]
# should iterate backwards as we going down
for col_index, row_index in diag_list_up:
# if player won in horizontal completion then this checker can be anywhere in conbination in a row
if col_index is None or row_index is None:
continue
if game_board[row_index][col_index] == player:
counter += 1
if counter == win_length:
return player
continue
else:
break
return 0
def check_player_won(game_board, last_played_loc, player, win_length):
winner = check_diag_win_backslash(game_board, last_played_loc, player, win_length) or check_diag_win_fslash(
game_board, last_played_loc, player, win_length) or check_horz_win(game_board, last_played_loc,
player,
win_length) or check_ver_win(
game_board, last_played_loc, player, win_length)
if winner:
return winner
else:
return 3
def play(input_file):
validation_code = validate_file(input_file[0:3])
if validation_code >0 :
return validation_code
game_board = construct_game_board(input_file[0:2])
player = 1
attemps_played_total = 1
wining_length = input_file[2]
winning_player = 0
for col in input_file[3:]:
if winning_player:
return 4
played_loc = col - 1
game_board, chaged_loc = execute_player_action(game_board, played_loc, player)
if attemps_played_total >= input_file[2] * 2 - 1:
winner = check_player_won(game_board, chaged_loc, player, wining_length)
if winner in [1, 2]:
winning_player = winner
continue
if attemps_played_total >= np.size(game_board):
# draw
return 0
# # incomplete game
# return 3
player = 2 if player == 1 else 1
attemps_played_total += 1
if winning_player:
return winning_player
elif attemps_played_total < np.size(game_board):
# incomplete
return 3
def validate_file(meta_data_array):
if len(meta_data_array) < 2:
return 8
row = meta_data_array[0]
col = meta_data_array[1]
winning_length = meta_data_array[2]
if (winning_length > row and winning_length > col) or (row > winning_length and col < 2) or (
col > winning_length and row < 2):
# illegal game
return 7
else:
return 0
if __name__ == '__main__':
if len(sys.argv) != 2:
exit('connectz.py: Provide one input file')
file_name = sys.argv[1]
try:
data = np.fromfile(file_name, dtype=int, sep=' ')
return_code = play(data)
except IOError:
return_code = 9
print(return_code)
exit(return_code)
| [
"[email protected]"
] | |
68cedabfbc09a63f6c95ab47625817a4d4f29131 | 758e34c96569a52b24454311e263d12ef8b0a4af | /socks/__init__.py | e06450c60206faa84f0c1f4f7289cea508ce922c | [] | no_license | devenjarvis/aws-socks | fde00ef0f9e122121dd62dacd2ff74ac117c8186 | 8e917dcec062bdbfac95b57fd1dbbb9a5f7e810d | refs/heads/master | 2022-12-17T05:36:21.682336 | 2020-09-12T23:56:28 | 2020-09-12T23:56:28 | 279,918,200 | 3 | 1 | null | 2020-09-13T00:07:28 | 2020-07-15T16:19:49 | Python | UTF-8 | Python | false | false | 248 | py | # Core AWS Service abstractions
#import socks.ssm
import boto3
import socks.athena
import socks.cloudwatch
import socks.dynamodb
import socks.s3
import socks.secretsmanager
import socks.sqs
import socks.ssm
import socks.sts
import socks.aws_lambda
| [
"[email protected]"
] | |
795dbd21792c5f06fc2b33e3ded0ba6d473c4161 | bf7959048edc0005e04431a0864c719adc5ea9ea | /python版本/222.CountNodes.py | 484d866b66bbde6d0c8250b82279c24b92434a61 | [] | no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countNodes(self, root: TreeNode) -> int:
if not root:
return 0
lh = 0
l = root
while l:
l = l.left
lh += 1
lb,rb = int(2**(lh-1)), int(2**(lh)-1)
while lb <= rb:
m = lb + (rb - lb) // 2
path = int(2**(lh-2))
node = root
while node and path > 0:
if m & path:
node = node.right
else:
node = node.left
path //= 2
if node:
lb = m + 1
else:
rb = m - 1
return rb | [
"[email protected]"
] | |
56057d7e91a6da371c702cdb1b2e71d854c5a446 | daf9fc80a6a68fbe620af85a3eeebb3f53a593eb | /app/pru.py | e21b70d5b678b3d6506f2dce9b08435278c76eb9 | [
"MIT"
] | permissive | HudsonWerks/visualpru | 9625430616fbe9db16d5f50e561b0c32730e104d | 5bbb44ebb0798a493ad33f4881fa7845e8aeff74 | refs/heads/master | 2021-01-18T18:25:57.332862 | 2014-08-20T18:38:11 | 2014-08-20T18:38:11 | 23,190,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,636 | py | from __future__ import print_function
from __future__ import division
import os
import mmap
import struct
import re
import pasm
class PRU:
def __init__(self, id, constants, shared_constants):
self.id = id
self.constants = constants
self.shared_constants = shared_constants
self.f = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
self.pruss_mmap = self.map_memory(self.f, self.shared_constants.PRUSS_RANGE)
self.source_files = []
self.compiled_file = {}
self.errors = []
self.warnings = []
#Pass in the range of addresses we want to cover, and size the mmap accordingly
def map_memory(self,f, memory_range):
if (memory_range[1] - memory_range[0] + 1) % mmap.PAGESIZE == 0:
#The range is fully covered by a multiple of the pagesize
multiple = (memory_range[1] - memory_range[0] + 1) // mmap.PAGESIZE
else:
#Since the page size is not evenly divisible by the range, we need to round up to the next page size to fully cover the entire range
multiple = 1 + ((memory_range[1] - memory_range[0] + 1) // mmap.PAGESIZE)
mm = mmap.mmap(f, mmap.PAGESIZE * multiple, offset=memory_range[0])
return mm
def unmap_memory(self):
self.pruss_mmap.close()
os.close(self.f)
def read_register(self,register_block_offset,register_offset,byte_count):
r = self.pruss_mmap[register_block_offset+register_offset:register_block_offset+register_offset+byte_count]
return struct.unpack("<L",r)[0]
def write_register(self,register_block_offset,register_offset,byte_count,value):
packed_value = struct.pack("<L",value)
self.pruss_mmap[register_block_offset+register_offset:register_block_offset+register_offset+byte_count] = packed_value
def is_running(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
if (r & (1<<self.shared_constants.RUNSTATE_BIT)) == 0:
return False
else:
return True
def reset(self,value=None):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
#Clear bit to trigger a reset
r &= ~(1<<self.shared_constants.SOFT_RST_N_BIT)
#Set a start instruction if specified, otherwise just jump to the default
if value is not None:
#Clear the current counter value and set the new one
r &= ~(0xFFFF<<self.shared_constants.PCOUNTER_RST_VAL_BIT)
r |= ((value & 0xFFFF)<<self.shared_constants.PCOUNTER_RST_VAL_BIT)
self.write_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4,r)
def set_singlestep_mode(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
r |= (1<<self.shared_constants.SINGLE_STEP_BIT)
self.write_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4,r)
def set_freerunning_mode(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
r &= ~(1<<self.shared_constants.SINGLE_STEP_BIT)
self.write_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4,r)
def get_run_mode(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
if r&(1<<self.shared_constants.SINGLE_STEP_BIT) != 0:
mode = 'step'
else:
mode = 'continuous'
return mode
#TODO: Also check to see if the PRU is asleep
def get_status(self):
if self.is_running():
status = 'running'
else:
status = 'halted'
return status
def get_program_counter_value(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.STATUS_OFFSET,4)
return r & 0xFFFF
def halt(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
r &= ~(1<<self.shared_constants.ENABLE_BIT)
self.write_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4,r)
def run(self):
r = self.read_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4)
r |= (1<<self.shared_constants.ENABLE_BIT)
self.write_register(self.constants.PRUSS_PRU_CTRL_OFFSET,self.shared_constants.CONTROL_OFFSET,4,r)
def write_opcodes_to_iram(self,opcodes):
for i,opcode in enumerate(opcodes):
self.write_register(self.constants.PRU_IRAM_OFFSET,i*4,4,opcode)
def get_gpreg_value(self,number):
return self.read_register(self.constants.PRU_ICSS_PRU_DEBUG_OFFSET,self.shared_constants.GPREG_OFFSET+number*4,4)
def get_gpreg_values(self):
registers = []
for i in range(self.shared_constants.GPREG_COUNT):
registers.append({'name' : 'r'+str(i), 'value': self.get_gpreg_value(i)})
return registers
def compile_and_upload_program(self, compilation_directory, source_files):
#Create the PRU directory structure if it doesn't exist
program_directory = os.path.join(compilation_directory,self.id)
if not os.path.exists(program_directory):
os.makedirs(program_directory)
#Create the source files and store an internal copy
for source_file in source_files:
with open(os.path.join(program_directory,source_file['name']),'w') as f:
f.write(source_file['content'])
self.source_files = source_files
#Get the primary filename, which is the first file in the array by convention
#TODO: Make it an entry in the object? eg. check for a 'primary':true flag
primary_filename = source_files[0]['name']
#Compile the source files
errors, warnings = pasm.compile(program_directory,primary_filename)
self.errors = errors
self.warnings = warnings
#Write to PRU memory if there are no errors
if not errors:
#A source file is guaranteed to have a '.p' or '.hp' extension, so we can rely string substitution
compiled_filename = re.sub('(?:\.p)$|(?:\.hp)$','.lst',primary_filename)
#Write the program to memory
#TODO: Confirm that the combined size of the opcodes is than the PRU's IRAM...
# : len(opcodes) * 4 <= self.shared_constants.MAX_IRAM_SIZE where 4 represents the bytesize of each opcode
opcodes, instructions = pasm.parse_compiler_output(program_directory,compiled_filename)
self.write_opcodes_to_iram(opcodes)
self.compiled_file = {'name':compiled_filename,'content':instructions}
#NOTE: This function returns a dictionary oject with state information in a format that mirrors the front-end model state
def get_state(self):
pru = {}
pru['id'] = self.id
pru['state'] = {'programCounter':self.get_program_counter_value(), 'status': self.get_status(), 'runMode': self.get_run_mode()}
pru['program'] = {
'sourceFiles' : self.source_files,
'compiledFile' : self.compiled_file,
'errors' : self.errors,
'warnings' : self.warnings
}
pru['memory'] = {'generalPurpose':self.get_gpreg_values()}
return pru | [
"[email protected]"
] | |
98044cdd44063bb110f6b637a8192e571ea4c3c2 | 4e362157246306f095a904dced51e16a87d82b79 | /ex14.py | bef360bade2068ea51a670f3a968cc3071249a70 | [] | no_license | tejaveturi/python | c17fd605c0da6123f0b22d4d70aabee96edd4187 | 83c117f2d6f6ffc25da35ba2445c4ff3c9afd723 | refs/heads/master | 2021-01-11T13:47:37.035056 | 2017-05-19T01:25:35 | 2017-05-19T01:25:35 | 86,526,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from sys import argv
script, user_name = argv
prompt = '>'
print "Hi %s I am %s" % (user_name, script)
print" where do you live %s" %user_name
live = raw_input(prompt)
print " do you like me %s " % user_name
like = raw_input(prompt)
print "what computer do you have %s" % user_name
computer = raw_input(prompt)
print """ your name is %s,
you said %s about liking,
you live in %s,
you have %s computer""" % (user_name, like, live, computer)
| [
"[email protected]"
] | |
a0500d0291da3fd04b167a482f55b0cfb912567c | 5ea8be3085b173a5fa006a2d9538ea5d92d08957 | /py/settings.py | 342080897813d9c04afbb952e4d8ffaa376bb3b8 | [] | no_license | amirgraily7/PDFMetadataExtractor | c01d3ab6535edb5a4080a76e3aac1f2d1beb5be3 | c15290c44376fcdaa1a1597478f4d27fe44ab45c | refs/heads/master | 2021-05-31T07:56:13.479570 | 2016-02-28T02:15:38 | 2016-02-28T02:15:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,780 | py | import yaml
import collections
import os
import importlib
import pattern_builder
import re
class Settings:
"""Represent settings and do related things.
Settings are stored in a YAML
"""
def __init__(self, filename=None):
"""Load settings from a file.
Loads the settings from the YAML file given by filename (or a default path
if none is specified). Initialize all fields and the pattern builder.
Store all files and directories as absolute paths.
"""
if filename is None:
filename = self.default_file()
self.filename = filename
self._load_from_file()
self.pattern_builder = pattern_builder.PatternBuilder(self._data['substitutions'])
self._load_fields()
self._set_files()
self._set_directories()
self._extra_labels = self._data['extra_labels']
self.test_proportion = self._data['test_proportion']
def _load_from_file(self):
"""Load the settings from the given filename."""
with open(self.filename, 'r') as f:
self._data = yaml.load(f)
def session(self):
"""Get a SQLAlchemy session object for the database specified."""
import sqlalchemy.orm
maker = sqlalchemy.orm.sessionmaker(bind=self.engine())
return maker()
def engine(self):
"""Get a SQLAlchemy engine object for the specified database."""
import sqlalchemy
db = self._data['db']
address = "%s://%s:%s@%s:%d/%s" % (db['backend'], db['username'],
db['password'], db['server'],
db['port'], db['name'])
if "charset" in db:
address += "?charset=%s" % db['charset']
return sqlalchemy.create_engine(address)
def _set_files(self):
"""Get filenames from settings dictionary and store absolute paths."""
files = collections.defaultdict(dict, self._data['files'])
self._files = {key: self.resolve_path(value)
for key, value in files.iteritems()}
def _set_directories(self):
"""Store absolute paths for directories."""
directories = collections.defaultdict(dict, self._data['directories'])
self._directories = {key: self.resolve_path(value)
for key, value in directories.iteritems()}
def resolve_path(self, path):
"""Convert a filename from the settings file to an absolute path.
Absolute paths are left as is. Relative paths are assumed to be
relative to the settings file.
"""
settings_file = self.filename
if not os.path.isabs(path):
return os.path.join(os.path.split(settings_file)[0], path)
else:
return path
def default_file(self):
"""A default location for the settings YAML file."""
return os.path.abspath("../settings.yml")
def substitutions(self):
"""The allowable substitutions to be used when generating patterns."""
return self._data['substitutions']
def get_directory(self, name):
"""Retrieve the absolute path for a directory."""
return self._directories[name]
def get_file(self, name):
"""Retrieve the absolute path for a file."""
return self._files[name]
def _load_fields(self):
"""Load fields specified in the settings file."""
self.fields = {}
for name in self._data['fields']:
info = self._data['fields'][name]
if 'disabled' not in info or not info['disabled']:
module = importlib.import_module(info['module'])
cls = info['class']
func = getattr(module, cls)
params = info.get('parameters', {})
self.fields[name] = func(self, name, info, **params)
def load_labels(self):
"""Load correct metadata labels from a YAML file.
This is called when populating the database.
"""
with open(self.get_file('label'), "r") as f:
return yaml.load(f)
# TODO: the following would probably fit better somewhere else
def strip_labels(self, text):
""" Remove all field labels from some text.
:param text: A string from which to remove labels.
:return: A list of strings formed by removing labels from the text.
"""
labels = sum([field.labels for field in self.fields.values()], self._extra_labels)
pattern = self.pattern_builder.list_pattern(labels)
if pattern is None:
return text
return re.split(pattern, text)
def map_tables(self):
""" Map the Document, Box, and Line classes to their SQL tables."""
from schema import document_table, box_table, line_table
from sqlalchemy import MetaData
from sqlalchemy.orm import mapper, relationship
from pdf_classes import Document, Box, Line
metadata = MetaData()
mapper(Document, document_table(self.fields, metadata),
properties={'boxes': relationship(Box, back_populates='document'),
'lines': relationship(Line, back_populates='document')
})
mapper(Box, box_table(metadata),
properties={'document': relationship(Document, back_populates='boxes'),
'lines': relationship(Line, back_populates='box')
})
mapper(Line, line_table(metadata),
properties={'document': relationship(Document, back_populates='lines'),
'box': relationship(Box, back_populates='lines')
})
return metadata
| [
"[email protected]"
] | |
c4e5a95d799181a421ae69c2a2c1605b7a024145 | a932f09ba6c9f5d6086ed70f7311fca41b77ef92 | /wbManager/__init__.py | c4b70a893c318cc719c4b275c53b5de3db692727 | [] | no_license | N0taN3rd/pywbModules | 446b2d69ed4d518e1b439b50d5a61019f8639b5c | d456e3c0006e074dfcc770b4193bd3aa59b22b31 | refs/heads/master | 2021-01-20T21:12:07.433132 | 2016-08-27T20:49:39 | 2016-08-27T20:49:39 | 65,776,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | import sys, os
if getattr(sys, 'frozen', False):
# we are running in a bundle
frozen = 'ever so'
bundle_dir = sys._MEIPASS
DEFAULT_CONFIG = os.path.join(bundle_dir,'default_config.yaml')
else:
DEFAULT_CONFIG = 'wbManager/default_config.yaml'
def get_test_dir():
if getattr(sys, 'frozen', False):
# we are running in a bundle
frozen = 'ever so'
bundle_dir = sys._MEIPASS
return os.path.join(os.path.dirname(os.path.realpath(bundle_dir)),
'sample_archive') + os.path.sep
else:
return os.path.join(os.path.dirname(os.path.realpath(__file__)),
'sample_archive') + os.path.sep
| [
"[email protected]"
] | |
b260f208bb5d54f3c60ebdd94ef44c20c976b2dc | 16df28be8eafbb07384c7a1ad7bd686c7e392382 | /dispersion_functions.py | c6be3a7db23e005129e3f53724dfade59aa1eb4e | [] | no_license | kd891/TuringPatterns_MRes | a81db5acbb41a7903dc477ad4e46b55dc28e6f66 | 262cb96b28e326e8249c09b037590c1a2a258397 | refs/heads/main | 2023-07-13T09:39:18.094876 | 2021-08-23T22:42:47 | 2021-08-23T22:42:47 | 399,262,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | '''
This file contains all the functions required to perform the linear stability analysis
1) Finding the steady state values
2) Harmonic diagonal perturbation of the steady state in the presence of diffusion
'''
import scipy
from scipy import optimize
import numpy as np
'''
Steady State: Powell's dogleg method
N.B. we have to call ss.x for the output and ss.result for the condition
'''
def steady_state(func, x0, k, jacobian):
ss = optimize.root(func,x0, args=k, jac=jacobian)
return ss
def ss_newton(func, x0, k):
ss = scipy.optimize.newton(func, x0, fprime=None, args=(k,))
return ss
'''
Harmonic perturbation
'''
def DispRel_four(wvn, jac, D_A=0.01, D_B=0.01, D_C=0.4, D_D=0.4):
jac[0, 0] += -D_A*wvn**2
jac[1, 1] += -D_B*wvn**2
jac[2, 2] += -D_C*wvn**2
jac[3, 3] += -D_D*wvn**2
eigval = np.linalg.eig(jac)
return eigval
def DispRel_two(wvn, jac, D_A=0.01, D_B=0.4):
jac[0, 0] += -D_A*wvn**2
jac[1, 1] += -D_B*wvn**2
eigval = np.linalg.eig(jac)
return eigval
def DispRel_three(wvn, jac, D_A=0.01, D_B=0.4, D_C=0.4):
jac[0, 0] += -D_A*wvn**2
jac[1, 1] += -D_B*wvn**2
jac[2, 2] += -D_C*wvn**2
eigval = np.linalg.eig(jac)
return eigval
| [
"[email protected]"
] | |
ff360813a0192800928a486df1db909c9c890603 | e2f5a314f8f3c03acf6452c7648958632b443179 | /job/migrations/0003_job_description.py | d9291a0828b7554e83bc1ece159b19be467c6c01 | [] | no_license | Anas-Darwish-SB/django-job-board | 8c82cff929ddcba0f23cd592aff03e81a11f9dc9 | 3d65320217d96b6914cbae0ab43a886cd352e1f0 | refs/heads/main | 2023-03-01T15:32:54.378367 | 2021-02-05T07:20:50 | 2021-02-05T07:20:50 | 335,994,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Generated by Django 3.1.6 on 2021-02-04 17:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0002_job_job_type'),
]
operations = [
migrations.AddField(
model_name='job',
name='description',
field=models.TextField(default='', max_length=1000),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
f047285cfd37ada2200725af048a79b49b00a69c | 13c22985b2062d29e18885f36e64346fad7f63e3 | /advertisement/serializers.py | 5a064c8ab3ebf2333903959be6f4b1bdcb62a25b | [] | no_license | zozoh94/GivagoAPI | b10e3af40450c5576f25c1de2d3507dccbf0edde | 43913479bc1fac9d83d59ada9f829bc431c6b356 | refs/heads/master | 2020-12-25T14:58:51.643452 | 2017-06-13T11:42:50 | 2017-06-13T11:42:50 | 66,844,100 | 0 | 0 | null | 2019-10-22T20:44:31 | 2016-08-29T13:02:25 | Python | UTF-8 | Python | false | false | 3,103 | py | from rest_framework import serializers
from embed_video.backends import detect_backend
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from taggit_serializer.serializers import (TagListSerializerField,
TaggitSerializer)
from django.utils.translation import ugettext_lazy as _
from .models import Ad
from .models import App
from sponsor.models import Sponsor
from sponsor.models import SponsorManager
from sponsor.serializers import SponsorSerializer
class VideoSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
video = detect_backend(obj)
return {
'url': obj,
'url_embed': video.get_url(),
'thumbnail': video.get_thumbnail_url()
}
def to_internal_value(self, data):
try:
backend = detect_backend(url)
backend.get_code()
except UnknownBackendException:
raise serializers.ValidationError(_(u'URL could not be recognized.'))
except UnknownIdException:
raise serializers.ValidationError(_(u'ID of this video could not be '
u'recognized.'))
return data
class AdSerializer(TaggitSerializer, serializers.ModelSerializer):
video = VideoSerializer(required=True)
author = serializers.ReadOnlyField(source='author.user.username')
sponsor_url = serializers.HyperlinkedRelatedField(view_name='sponsor-detail', read_only=True, source='sponsor')
sponsor = serializers.PrimaryKeyRelatedField(queryset=Sponsor.objects.all(), write_only=True, required=True)
tags = TagListSerializerField(required=False, read_only=True)
class Meta:
model = Ad
fields = ('id', 'url', 'name', 'video', 'author', 'sponsor', 'sponsor_url', 'tags')
def validate_sponsor(self, value):
if value is not None:
try:
manager_in_sponsor = value.managers.all()
except SponsorManager.DoesNotExist:
raise serializers.ValidationError("Sponsor don't have any manager.")
try:
current_manager = self.context['request'].user.sponsormanager
except SponsorManager.DoesNotExist:
raise serializers.ValidationError("You're not a manager.")
if current_manager in manager_in_sponsor or self.context['request'].user.is_superuser:
return value
else:
raise serializers.ValidationError("You can't assign this sponsor to the advertisement. You're not a manager of this sponsor.")
else:
return value
class AdDetailSerializer(AdSerializer):
sponsor_detail = SponsorSerializer(read_only=True, source='sponsor')
class Meta:
model = Ad
fields = ('url', 'name', 'video', 'author', 'sponsor', 'sponsor_detail', 'tags', 'number_views', 'number_views_different_user')
class AppSerializer(serializers.ModelSerializer):
class Meta:
model = App
fields = ('id', 'name', 'link', 'thumbnail')
| [
"[email protected]"
] | |
8f4c6030c1dc333a1e1776bfe77cbc9ba9f0bcf1 | db9cc680a60997412eae035b257cc77efbcdcb06 | /py3/leetcodeCN/competition/2020/2020-3-1-cp-3.py | 080ce7ceb567a3597e3ddf364c513a2eb84a902e | [] | no_license | Azson/machineLearning | 9630b62c73b2388a57c630644dae3ffa8e4db236 | 35662ddf39d322009f074ce8981e5f5d27786819 | refs/heads/master | 2022-05-06T07:03:23.543355 | 2021-08-20T14:57:25 | 2021-08-20T14:57:25 | 179,935,258 | 3 | 3 | null | 2019-11-04T14:26:51 | 2019-04-07T08:07:08 | Python | UTF-8 | Python | false | false | 1,398 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSubPath(self, head, root):
"""
:type head: ListNode
:type root: TreeNode
:rtype: bool
"""
dt = dict()
def dfs(now_node, to_search):
if now_node.val == to_search.val:
to_search = to_search.next
if to_search is None:
return True
else:
to_search = head
if now_node in dt and to_search in dt[now_node]:
return False
f = False
if now_node.left:
f |= dfs(now_node.left, to_search)
if not f and to_search != head:
f |= dfs(now_node.left, head)
if not f and now_node.right:
f |= dfs(now_node.right, to_search)
if not f and to_search != head:
f |= dfs(now_node.right, head)
if now_node not in dt:
dt[now_node] = dict()
dt[now_node][to_search] = f
return f
return dfs(root, head) | [
"[email protected]"
] | |
c402d92be0fefde5dce06ba3d84f6f68b959e4b1 | 031be4933b53a382bf5573e1a8c3259ce7be6435 | /models/tag.py | 96b3562a6d2fdba334272961f4e9327a31128dfc | [] | no_license | ScarlettSamantha/depricated | 8f6241bf3d357c1d5a9f561110da0a3d3604bc85 | 0f4e43398a4a595333371b12bd37cc145bf1ce0e | refs/heads/master | 2023-02-20T14:30:19.715146 | 2018-07-27T14:33:20 | 2018-07-27T14:33:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from uuid import uuid4
from helpers.uuid import UuidField
from TransHelp import db
from datetime import datetime
class Tag(db.Model):
id = db.Column(UuidField, unique=True, nullable=False, default=uuid4, primary_key=True)
name = db.Column(db.String(255), unique=False, nullable=False)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
date_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow) | [
"[email protected]"
] | |
f3d2d04de7d85a941f4cd9cf4d9eea6843ec645f | d86305121299d4827faa19f3d5d7176e6764f9bc | /modules/bbc/chain/wallet.py | 6e6b7c4009b2328617c499b54e9f82c6ab08a9a6 | [
"MIT"
] | permissive | pdinkins/pythos | af5d07e7339dc7e39def66d290d7bc54bf1f3713 | 7cca6ae94090f5179d3e108ded78559c0775535a | refs/heads/master | 2020-03-21T12:27:21.031224 | 2018-07-11T17:01:49 | 2018-07-11T17:01:49 | 138,553,400 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,947 | py | # Wallet
# stores all functions relating to the user hash wallet
# dyanmically stores data for wallet currently in use
current_wallet = []
wallet_data = []
_system_architecture_dyna = []
# dumps all dynamic data
def dynamic_data_dump():
current_wallet.clear()
wallet_data.clear()
_system_architecture_dyna.clear()
class NewWallet:
def __init__(self):
self.timestamp = self.time_stamp()
self.usr_nym = self.user_nym()
self.node_build = self.user_build()
self.id = self.generate_user_id()
def time_stamp(self):
import datetime
return datetime.datetime.now()
def user_nym(self):
usernym = input('This info will be used to name the local wallet file.\nuser_nym> ')
return usernym
def user_build(self):
# for testing the current local build
# initial import
try:
import os, sys
import platform
import datetime
import subprocess
import requests
import time
import pynetwork.ipfs as ipfs
#from classes import User, Idea
#import bloacks, bloacks, chain, client
#import generate, menu, writer, ledger
except:
print('FATAL__BUILD__ERROR')
error = sys.exc_info()
print(error)
print(sys.exc_info()[0])
raise
try:
# current cpu system configuration
log('0_SYSTEM_CONFIG')
self._0_node_ip = requests.get('http://ip.42.pl/raw').text
log(self._0_node_ip)
DEBUG_headers = False
if DEBUG_headers == True:
self._0_node_config = requests.get('http://ip.42.pl/headers').text
log(self._0_node_config)
else:
log('DEBUG_headers = ' + DEBUG_headers)
self._system_architecture = platform.uname()
log(self._system_architecture)
self.node = platform.platform()
log(self.node)
self._python_build = platform.python_build()
log(self._python_build)
self._system = platform.system()
log(self._system)
self._python_compiler = platform.python_compiler()
log(self._python_compiler)
log('0_SYSTEM_CONFIGFILE')
self.n0osd = [
self._0_node_ip,
self._0_node_config,
self._system_architecture,
self.node,
self._python_build,
self._system,
self._python_compiler
]
return self.n0osd
except:
print(datetime.datetime.now(), 'SYSTEM LOG')
error = sys.exc_info()
print(error)
print(sys.exc_info()[0])
raise
def generate_user_id(self):
import hashlib
wid = hashlib.sha256()
wid.update(str(self.timestamp).encode('utf=8') +
str(self.usr_nym).encode('utf-8') +
str(self.node_build).encode('utf-8'))
return wid.hexdigest()
def generate_new_wallet():
wallet = NewWallet()
sys_arc = wallet.user_build()
builder(wallet.id, wallet.usr_nym, sys_arc)
wd = [wallet.id, wallet.usr_nym, sys_arc]
for i in range(0, len(wd)):
log(wd[i])
set_current_wallet(wd)
def builder(id, nym, arc):
try:
import pynetwork.ipfs as ipfs
ipfs.initialize_ipfsapi()
config_file = open('config_0_node.txt', 'w')
for i in range(0, len(arc)):
config_file.writelines(arc[i])
config_file.close()
ipfs.add_file('config_0_node.txt')
except:
print('__BUILD__FILE__IPFS__ERROR')
import sys
error = sys.exc_info()
print(error)
print(sys.exc_info()[0])
raise
def set_current_wallet(rgw):
try:
d = str(input('Use recently generated wallet as current usable hash wallet [y/n]? >')).lower()
#fill dynamic storage list
if d == 'y':
for i in range(0, len(rgw)):
current_wallet.append(rgw[i])
log('Succesfully set the current usable wallet')
# pipe back to client interface
elif d == 'n':
aus = str(input('are you sure [y/n]? > ')).lower()
if aus == 'y':
# pipe to client interface
print('Did not set current usable wallet')
elif aus == 'n':
for i in range(0, len(rgw)):
current_wallet.append(rgw[i])
else:
raise TypeError
else:
raise TypeError
except TypeError:
print("ERROR: Not a valid input")
return
def print_cw():
for i in range(0, len(current_wallet)):
print(current_wallet[i])
class WalletFile:
def __init__(self):
self.wfn = self.gwfn()
self.walletfile = self.generate_nwf()
def gwfn(self):
# relay function for future security checks on wallet
file_nym = self.cwe()
return file_nym
# checks if current wallet exists and generates if not
def cwe(self):
if not current_wallet:
print('No current usable wallet')
try:
gen = str(input('Generate new wallet [y/n]? >')).lower()
if gen == 'y':
generate_new_wallet()
print('Test statement for flow..........')
elif gen == 'n':
# pipe to function to set wallet from file
pass
else:
raise TypeError
except TypeError:
print('ERROR: Invalid input')
elif current_wallet:
w_nym = current_wallet[0]
wfn = w_nym + '.csv'
return wfn
def generate_nwf(self):
import csv
open(self.wfn, mode='w')
def write_wallet():
wallet_f = WalletFile()
name = wallet_f.wfn
import csv
with open(name, 'a', newline='') as wallet:
writer = csv.writer(wallet)
writer.writerow([current_wallet[0],
current_wallet[1],
current_wallet[2]])
def write_cwf():
import csv
try:
with open ('main.csv', 'w', newline='') as cwallet:
writer = csv.writer(cwallet)
writer.writerow([current_wallet[0],
current_wallet[1],
current_wallet[2]])
except FileNotFoundError:
open('main.csv', 'w')
return
def parse_wallet():
try:
wallet_data.clear()
import csv
walletfile = str(input('walletfile> '))
with open(walletfile) as wallet:
reader = csv.reader(wallet)
for row in reader:
wallet_data.append(row[0])
wallet_data.append(row[1])
wallet_data.append(row[2])
except FileNotFoundError:
print('ERROR: WALLET__NOT__FOUND')
def log(msg):
wallet_auto_log(msg)
def wallet_auto_log(message):
import inspect, logging
import datetime as dt
# Get the previous frame in the stack, otherwise it would
# be this function!!!
func = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
logging.debug("{}\t{}\t{}\t{}".format(
dt.datetime.now(),
func.co_filename,
func.co_name,
message
))
debug_menu = False
'''
import app.menu as m
md = {'new wallet': generate_new_wallet,
'print current wallet info': print_cw,
'write wallet': write_wallet,
'quit': m.quit_menu}
while debug_menu:
m.initialize_menu(md, 'title')
''' | [
"[email protected]"
] | |
531cdbf271adab4283793689ac212bbf31ec34ca | f1e1dd0a7c99045c369700bb912f27b1ef594c38 | /reviews/migrations/0001_initial.py | 923b221732ca647790232f6712b268d5cd540de5 | [] | no_license | gamitarchana/cabdemo1 | 5a362cc90a4fe373a1a3e3d0c522b5bb4d664593 | b9243635716dfa600aeb6d70679022f7b9af9482 | refs/heads/master | 2020-05-29T14:21:10.623072 | 2019-05-29T09:19:58 | 2019-05-29T09:19:58 | 189,192,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # Generated by Django 2.1.8 on 2019-05-20 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('outstation', '0001_initial'),
('login', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('review_comments', models.TextField(help_text='Add Review')),
('rating', models.PositiveSmallIntegerField(blank=True, default=0, null=True)),
('publish_date', models.DateTimeField(auto_now_add=True)),
('route', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='page_review', to='outstation.OutstationRoutePage')),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_review', to='login.UserProfile')),
],
),
migrations.CreateModel(
name='ReviewImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='reviews/images/')),
('upload_date', models.DateTimeField(auto_now_add=True)),
('review', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_image', to='reviews.Review')),
],
),
migrations.CreateModel(
name='ReviewVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video', models.FileField(upload_to='reviews/videos/')),
('upload_date', models.DateTimeField(auto_now_add=True)),
('review', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_video', to='reviews.Review')),
],
),
]
| [
"[email protected]"
] | |
8ae8ee1ac0d9a92d5f5173c5c6da8ae64d37fb07 | a65f6e9eba9fabea9b62b967e057180efe363b1d | /crm/models.py | 1f9ff471d1b30ca9d910848e38e4e7a573f5b283 | [] | no_license | SiriChandanaGoparaju/Foodservice | 7c5f4627b4e7f9d208d76350b2805e09a8edfeb5 | 57fd12111ed8275c3e52ea47563d3776aedaca51 | refs/heads/master | 2020-03-28T18:55:31.051779 | 2018-10-01T01:57:44 | 2018-10-01T01:57:44 | 148,927,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Customer(models.Model):
cust_name = models.CharField(max_length=50)
organization = models.CharField(max_length=100, blank=True)
role = models.CharField(max_length=100)
email = models.EmailField(max_length=100)
bldgroom = models.CharField(max_length=100)
address = models.CharField(max_length=200)
account_number = models.IntegerField(blank=False, null=False)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
zipcode = models.CharField(max_length=10)
phone_number = models.CharField(max_length=50)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_name)
class Service(models.Model):
cust_name = models.ForeignKey(Customer, on_delete=models.CASCADE, related_name='services')
service_category = models.CharField(max_length=100)
description = models.TextField()
location = models.CharField(max_length=200)
setup_time = models.DateTimeField(
default=timezone.now)
cleanup_time = models.DateTimeField(
default=timezone.now)
service_charge = models.DecimalField(max_digits=10, decimal_places=2)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_name)
class Product(models.Model):
cust_name = models.ForeignKey(Customer, on_delete=models.CASCADE, related_name='products')
product = models.CharField(max_length=100)
p_description = models.TextField()
quantity = models.IntegerField()
pickup_time = models.DateTimeField(
default=timezone.now)
charge = models.DecimalField(max_digits=10, decimal_places=2)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_name)
| [
"[email protected]"
] | |
347cde7559e5110cbe0a2f1da98c7276860f087f | ebf934fb6fd4e0ebbd870db857897fbb9d8022b7 | /test/nlp/hanLP_test.py | bd09b95b9eab1c22f831b1bfdcf0b9498d7ff034 | [] | no_license | AidenLong/ai | 6ce2bcf5928f8350ba8b440e9032ea4c39dd69ec | 0901e6010bbb51a165680e52d9adaeec7e510dc1 | refs/heads/master | 2020-05-03T13:27:38.698490 | 2019-08-22T03:18:09 | 2019-08-22T03:18:09 | 178,653,209 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,430 | py | # -*- coding:utf-8 -*-
# 引入java的执行环境;Jpype是一个让我们可以通过python来运行Java代码的工具包
from jpype import *
# startJVM(getDefaultJVMPath(), "-ea")
# java.lang.System.out.println("Hello World")
# shutdownJVM()
# 启动JVM,Linux需替换分号;为冒号:
startJVM(getDefaultJVMPath(), "-Djava.class.path=D:\syl\dev\hanlp-1.7.4-release\hanlp-1.7.4.jar;D:\syl\dev\hanlp-1.7.4-release",
"-Xms1g",
"-Xmx1g")
# 启动了JVM以后,就可以运行Java语句了
print("=" * 30 + "HanLP分词" + "=" * 30)
# 初始化一个Java类
HanLP = JClass('com.hankcs.hanlp.HanLP')
# 中文分词
print(HanLP.segment('你好,欢迎在Python中调用HanLP的API'))
print("-" * 70)
print("=" * 30 + "标准分词" + "=" * 30)
StandardTokenizer = JClass('com.hankcs.hanlp.tokenizer.StandardTokenizer')
print(StandardTokenizer.segment('你好,欢迎在Python中调用HanLP的API'))
print("-" * 70)
# NLP分词NLPTokenizer会执行全部命名实体识别和词性标注
print("=" * 30 + "NLP分词" + "=" * 30)
NLPTokenizer = JClass('com.hankcs.hanlp.tokenizer.NLPTokenizer')
print(NLPTokenizer.segment('中国科学院计算技术研究所的宗成庆教授正在教授自然语言处理课程'))
print("-" * 70)
print("=" * 30 + "索引分词" + "=" * 30)
IndexTokenizer = JClass('com.hankcs.hanlp.tokenizer.IndexTokenizer')
termList = IndexTokenizer.segment("主副食品");
for term in termList:
print(str(term) + " [" + str(term.offset) + ":" + str(term.offset + len(term.word)) + "]")
print("-" * 70)
print("=" * 30 + " CRF分词" + "=" * 30)
print("-" * 70)
print("=" * 30 + " 极速词典分词" + "=" * 30)
SpeedTokenizer = JClass('com.hankcs.hanlp.tokenizer.SpeedTokenizer')
print(NLPTokenizer.segment('江西鄱阳湖干枯,中国最大淡水湖变成大草原'))
print("-" * 70)
print("=" * 30 + " 自定义分词" + "=" * 30)
CustomDictionary = JClass('com.hankcs.hanlp.dictionary.CustomDictionary')
CustomDictionary.add('攻城狮')
CustomDictionary.add('单身狗')
HanLP = JClass('com.hankcs.hanlp.HanLP')
print(HanLP.segment('攻城狮逆袭单身狗,迎娶白富美,走上人生巅峰'))
print("-" * 70)
print("=" * 20 + "命名实体识别与词性标注" + "=" * 30)
NLPTokenizer = JClass('com.hankcs.hanlp.tokenizer.NLPTokenizer')
print(NLPTokenizer.segment('中国科学院计算技术研究所的宗成庆教授正在教授自然语言处理课程'))
print("-" * 70)
document = "水利部水资源司司长陈明忠9月29日在国务院新闻办举行的新闻发布会上透露," \
"根据刚刚完成了水资源管理制度的考核,有部分省接近了红线的指标," \
"有部分省超过红线的指标。对一些超过红线的地方,陈明忠表示,对一些取用水项目进行区域的限批," \
"严格地进行水资源论证和取水许可的批准。"
print("=" * 30 + "关键词提取" + "=" * 30)
print(HanLP.extractKeyword(document, 8))
print("-" * 70)
print("=" * 30 + "自动摘要" + "=" * 30)
print(HanLP.extractSummary(document, 3))
print("-" * 70)
text = r"算法工程师\n 算法(Algorithm)是一系列解决问题的清晰指令,也就是说,能够对一定规范的输入,在有限时间内获得所要求的输出。如果一个算法有缺陷,或不适合于某个问题,执行这个算法将不会解决这个问题。不同的算法可能用不同的时间、空间或效率来完成同样的任务。一个算法的优劣可以用空间复杂度与时间复杂度来衡量。算法工程师就是利用算法处理事物的人。\n \n 1职位简介\n 算法工程师是一个非常高端的职位;\n 专业要求:计算机、电子、通信、数学等相关专业;\n 学历要求:本科及其以上的学历,大多数是硕士学历及其以上;\n 语言要求:英语要求是熟练,基本上能阅读国外专业书刊;\n 必须掌握计算机相关知识,熟练使用仿真工具MATLAB等,必须会一门编程语言。\n\n2研究方向\n 视频算法工程师、图像处理算法工程师、音频算法工程师 通信基带算法工程师\n \n 3目前国内外状况\n 目前国内从事算法研究的工程师不少,但是高级算法工程师却很少,是一个非常紧缺的专业工程师。算法工程师根据研究领域来分主要有音频/视频算法处理、图像技术方面的二维信息算法处理和通信物理层、雷达信号处理、生物医学信号处理等领域的一维信息算法处理。\n 在计算机音视频和图形图像技术等二维信息算法处理方面目前比较先进的视频处理算法:机器视觉成为此类算法研究的核心;另外还有2D转3D算法(2D-to-3D conversion),去隔行算法(de-interlacing),运动估计运动补偿算法(Motion estimation/Motion Compensation),去噪算法(Noise Reduction),缩放算法(scaling),锐化处理算法(Sharpness),超分辨率算法(Super Resolution),手势识别(gesture recognition),人脸识别(face recognition)。\n 在通信物理层等一维信息领域目前常用的算法:无线领域的RRM、RTT,传送领域的调制解调、信道均衡、信号检测、网络优化、信号分解等。\n 另外数据挖掘、互联网搜索算法也成为当今的热门方向。\n"
print("=" * 30 + "短语提取" + "=" * 30)
print(HanLP.extractPhrase(text, 10))
print("-" * 70)
shutdownJVM()
| [
"[email protected]"
] | |
0b8a4d987be1c050c59b6bfd3e16eb59b6970292 | 343f954b367cf9a7e136c3cf748bdac6b5d40291 | /web/docker_addons/provider.py | ed2f64b993c8892a4306c51687891c5645b43ed8 | [] | no_license | TigerAppsOrg/TigerHost | e1e6ab69d0691af8fb7a7f22acae275b254b38f6 | df2bbc2c0f7b593930a5c5bc038232f66394f8c5 | refs/heads/master | 2023-02-07T10:21:55.134366 | 2016-05-25T01:59:57 | 2016-05-25T01:59:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,923 | py | import docker
from api_server.addons.providers.base_provider import BaseAddonProvider
from api_server.addons.providers.exceptions import AddonProviderError
from django.conf import settings
from docker_addons.docker_client import create_client
from docker_addons.models import ContainerInfo
class DockerAddonProvider(BaseAddonProvider):
def __init__(self, container_type, config_name):
"""Create a new Docker addon provider for
the specified container type.
:param docker_addons.containers.types.AddonTypes container_type: the addon type
:param str config_name: the name of the config to store in
"""
self.config_name = config_name
self.docker_client = create_client()
self.container_type = container_type
def _get_config_name(self, config_customization=None):
if config_customization is None:
return self.config_name
return config_customization + '_' + self.config_name
def begin_provision(self, app_id):
"""Kick off the provision process and return a UUID
for the new addon. This method MUST return immediately.
In the event of errors, raise any subclass of
:py:obj:`AddonProviderError <api_server.addons.providers.exceptions.AddonProviderError>`.
:param str app_id: the ID of the app that this addon will be for
:rtype: dict
:return: A dictionary with the following keys:\{
'message': 'the message to be displayed to the user',
'uuid': 'the unique ID for this addon. Must be a UUID object.',
}
:raises api_server.addons.providers.exceptions.AddonProviderError: If the resource cannot be allocated.
"""
instance = ContainerInfo.objects.create()
container = self.container_type.get_container(
container_info=instance,
docker_client=self.docker_client,
network_name=settings.DOCKER_NETWORK,
)
try:
container.run_container()
except (docker.errors.APIError, docker.errors.DockerException):
raise AddonProviderError('Addon cannot be allocated.')
return {
'message': 'Addon allocated. Please wait a while for it to become available. The URL will be stored at {} or {}.'.format(self.config_name, self._get_config_name('<CUSTOM_NAME>')),
'uuid': instance.uuid,
}
def provision_complete(self, uuid):
"""Check on the status of provision. This must return
immediately.
:param uuid.UUID uuid: The UUID returned from :py:meth:`begin_provision`
:rtype: tuple
:return: (bool, int) - The first value should be True if provision is
complete. The second value is an optional value to
tell the server how long (in seconds) to wait before
checking in again. Note that this is only looked at
if the first value is False
:raises api_server.addons.providers.exceptions.AddonProviderError: If provision failed.
"""
return True, 0
def get_config(self, uuid, config_customization=None):
"""Get the config necesary to allow the app to use this
addon's resources.
:param uuid.UUID uuid: The UUID returned from :py:meth:`begin_provision`
:param str config_customization: A string used to avoid conflict in config
variable names. This string should be incorporated into each of the config
variable names somehow, for example, <custom_name>_DATABASE_URL.
:rtype: dict
:return: A dictionary with the following keys:\{
'config':\{
'ENV_VAR1': ...
...
}
}
:raises api_server.addons.providers.exceptions.AddonProviderError:
If the config cannot be generated for some reason
(say, provision never started/failed).
"""
try:
instance = ContainerInfo.objects.get(uuid=uuid)
except ContainerInfo.DoesNotExist:
raise AddonProviderError(
'Addon with uuid {} does not exist.'.format(uuid))
container = self.container_type.get_container(
container_info=instance,
docker_client=self.docker_client,
network_name=settings.DOCKER_NETWORK,
)
return {
'config': {
self._get_config_name(config_customization=config_customization): container.get_url(),
}
}
def deprovision(self, uuid):
"""Kicks off the deprovision process. This should return right away.
:param uuid.UUID uuid: The UUID returned from :py:meth:`begin_provision`
:rtype: dict
:return: A dictionary with the following keys:\{
'message': 'The message to be displayed to the user.'
}
:raises api_server.addons.providers.exceptions.AddonProviderError:
If deprovision cannot start, or if it has already started.
"""
try:
instance = ContainerInfo.objects.get(uuid=uuid)
except ContainerInfo.DoesNotExist:
raise AddonProviderError(
'Addon with uuid {} does not exist.'.format(uuid))
container = self.container_type.get_container(
container_info=instance,
docker_client=self.docker_client,
network_name=settings.DOCKER_NETWORK,
)
try:
container.stop_container()
except (docker.errors.APIError, docker.errors.DockerException) as e:
raise AddonProviderError('{}'.format(e))
return {
'message': 'Addon deleted. PPlease remove {config_name} or {custom_name} manually.'.format(
config_name=self.config_name,
custom_name=self._get_config_name('<CUSTOM_NAME>'))
}
| [
"[email protected]"
] | |
60a5a3b2d88f1b5cfc9138ecdb5aa12bafdb305b | b5b5786665bf242bef93883b3689f4bda192f916 | /minimum number of operation.py | 7b921a3df547ec96560cf627e68889b20ae4e415 | [] | no_license | Manojna52/data-structures-with-python | c9bbd44bbedeb3c4f2d6188e1e0a579ee22d15e9 | ae4df664a90ce9df83ca75354c15416d74f86da5 | refs/heads/master | 2023-03-19T06:02:58.028046 | 2021-03-20T04:28:40 | 2021-03-20T04:28:40 | 349,619,394 | 0 | 0 | null | 2021-03-20T04:28:41 | 2021-03-20T03:43:45 | Python | UTF-8 | Python | false | false | 176 | py | x=input()
x=int(x)
def target(x):
if(x==0):
return 0;
if(x%2==0):
return 1+target(x//2);
if(x%2!=0):
return 1+target(x-1);
print(target(x))
| [
"[email protected]"
] | |
151e27aa8434d20f0181f26549e74a8f679a726c | 119a40ccae50e41ee1b902f06785faa404e4f8e9 | /gen_zip.py | 6847973b4f6e821ff275c883d668defafff54276 | [] | no_license | randomdude999/smwc-preview | ffe8eda8da02b8b801c561c0acbac76f24e1faa2 | 69d760c7712d5e4e66afe7b3089ff8a6be4c36c3 | refs/heads/master | 2022-04-04T07:06:09.042427 | 2017-10-25T21:22:55 | 2017-10-25T21:22:55 | 108,280,176 | 1 | 2 | null | 2020-02-17T13:50:38 | 2017-10-25T14:16:50 | Python | UTF-8 | Python | false | false | 1,843 | py | import subprocess
import zipfile
import fnmatch
import os
# needed for building the extension
chrome_exe = os.environ["CHROME_EXE"]
exclude_from_zip = [
".idea",
"settings.json",
"gen_zip.py",
".gitignore",
"chrome_ext.pem",
"install_mode",
"smwc_preview.zip",
os.path.join("*", "error.log"),
os.path.join(".", "native_messaging_host", "smwc_preview.json"),
".git",
os.path.join("*", "__pycache__"),
"*.py[co]",
"README_user.txt",
"README.md",
os.path.join(".", "uri_handler", "uri_format.txt")
]
if os.path.exists("chrome_ext.pem"):
result = subprocess.run([chrome_exe,
"--pack-extension=" + os.path.abspath("chrome_ext"),
"--pack-extension-key=" + os.path.abspath("chrome_ext.pem")])
if result.returncode != 0:
print(f"chrome exited with error code {result.returncode}")
with zipfile.ZipFile('smwc_preview.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
for root, dirs, files in os.walk("."):
for x in dirs.copy(): # iterate over copy but modify original, modifying the thing you're iterating is bad and causes cryptic errors
root_relative_path = os.path.join(root if root != '.' else '', x)
for pattern in exclude_from_zip:
if fnmatch.fnmatch(root_relative_path, pattern):
dirs.remove(x)
break
for x in files:
root_relative_path = os.path.join(root if root != '.' else '', x)
include = True
for pattern in exclude_from_zip:
if fnmatch.fnmatch(root_relative_path, pattern):
include = False
break
if include:
zipf.write(root_relative_path)
zipf.write("README_user.txt", "README.txt")
| [
"[email protected]"
] | |
023ece318ab75778e99245e1d84e28a375bf3258 | 354ebc252c46fa844e1ff93ac554d3a0e1535ddd | /ex30.py | 465f243b9a42954578e1282df641779202318d59 | [] | no_license | yuliqing16/python_learn | de568e23e57141fe26b6db5e8c9a00899676f232 | ba7daa7fda08ea99ea3c1b30be9d18afd07deec8 | refs/heads/master | 2018-11-05T18:46:20.534032 | 2018-08-27T06:25:17 | 2018-08-27T06:25:17 | 114,659,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | people = 30
cars = 40
buses = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars.")
else:
print("We still can't decide.") | [
"[email protected]"
] | |
98d3c9a40cc074f8a5e5e4e336dccf81f7542b34 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_koshers.py | 7df1793076be59aa259e7fa5dbfcd802dd19301a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py |
from xai.brain.wordbase.adjectives._kosher import _KOSHER
#calss header
class _KOSHERS(_KOSHER, ):
def __init__(self,):
_KOSHER.__init__(self)
self.name = "KOSHERS"
self.specie = 'adjectives'
self.basic = "kosher"
self.jsondata = {}
| [
"[email protected]"
] | |
b05a9b35d1dc2bc4a1f6b1502a4d2517cc98fd12 | cb6b6ee58dbee4604058a3e44142d0ffb7906ba8 | /pypif/obj/common/reference.py | d73228acef181e4c1721f6f402341b3e3a654819 | [] | no_license | nad2000/pypif | a3991dab9bf3e0c558a2891b301c3dec494096a6 | dc9923792f91c53ac649b403620a387e1d86cb83 | refs/heads/master | 2020-04-07T07:43:57.980477 | 2016-01-13T21:54:58 | 2016-01-13T21:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,256 | py | from pypif.obj.common.name import Name
from pypif.obj.common.pages import Pages
from pypif.obj.common.pio import Pio
class Reference(Pio):
"""
Information about a referenced publication.
"""
def __init__(self, doi=None, isbn=None, issn=None, url=None, title=None, publisher=None, journal=None, volume=None,
issue=None, year=None, pages=None, authors=None, editors=None, references=None, **kwargs):
"""
Constructor.
:param doi: String with DOI of the published work
:param isbn: String with ISBN of the published work
:param issn: String with ISSN of the published work
:param url: String with URL to the published work
:param title: String with title of the published work.
:param publisher: String with publisher of the work.
:param journal: String with the journal in which the work was published.
:param volume: String with the volume in which the work was published.
:param issue: String with the issue in which the work was published.
:param year: String with the year in which the work was published.
:param pages: :class:`.Pages` object with the starting and ending pages for the published work.
:param authors: List of :class:`.Name` objects with information about the authors.
:param editors: List of :class:`.Name` objects with information about the editors.
:param references: List of :class:`.Reference` objects with works cited by this published work.
:param kwargs: Dictionary of field names not supported.
"""
super(Reference, self).__init__(**kwargs)
# These members have explicit setters and getters
self._pages = None
self._authors = None
self._editors = None
self._references = None
# Set the values for this object
self.doi = doi
self.isbn = isbn
self.issn = issn
self.url = url
self.title = title
self.publisher = publisher
self.journal = journal
self.volume = volume
self.issue = issue
self.year = year
self.pages = pages
self.authors = authors
self.editors = editors
self.references = references
@property
def pages(self):
return self._pages
@pages.setter
def pages(self, value):
self._pages = self._get_object(Pages, value)
@pages.deleter
def pages(self):
del self._pages
@property
def authors(self):
return self._authors
@authors.setter
def authors(self, value):
self._authors = self._get_object(Name, value)
@authors.deleter
def authors(self):
del self._authors
@property
def editors(self):
return self._editors
@editors.setter
def editors(self, value):
self._editors = self._get_object(Name, value)
@editors.deleter
def editors(self):
del self._editors
@property
def references(self):
return self._references
@references.setter
def references(self, value):
self._references = self._get_object(Reference, value)
@references.deleter
def references(self):
del self._references
| [
"[email protected]"
] | |
e365c6caa96ff93ca3ae52f83d2dad7fe420db8a | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/batch/models/node_agent_sku_paged.py | 421f4ce3570b08e498a112d19f4b911761114146 | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 1,386 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class NodeAgentSkuPaged(Paged):
"""
A paging container for iterating over a list of NodeAgentSku object
"""
_attribute_map = {
'next_link': {'key': 'odata\\.nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[NodeAgentSku]'}
}
def __init__(self, *args, **kwargs):
super(NodeAgentSkuPaged, self).__init__(*args, **kwargs)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.