blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92f0088358bab1fa58c2c52e016d253b12bfc28d
|
7246faf9a222269ce2612613f58dc5ff19091f10
|
/baekjoon/3000~5999/4948_베르트랑공준.py
|
f2adb647d3f69804cccea3dfb61db9c7a6ded31a
|
[] |
no_license
|
gusdn3477/Algorithm_Study
|
87a2eb72a8488d9263a86db70dadc7944434d41d
|
3fefe1dcb40122157845ffc542f41cb097711cc8
|
refs/heads/main
| 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 380 |
py
|
from math import sqrt
arr = [i for i in range(250000)]
arr[0] = 0
arr[1] = 0
for i in range(2, int(sqrt(250000)) + 1):
for j in range(i + i, 250000, i):
if arr[j] != 0:
arr[j] = 0
while True:
N = int(input())
ct = 0
if N == 0:
break
for i in range(N + 1, N * 2 + 1):
if arr[i] != 0:
ct += 1
print(ct)
|
[
"[email protected]"
] | |
05a8d2e1001f8b597c7d47ed5c8775417ea9301a
|
c254cdfc532242e9fca877e94f1a5df3ae7438fa
|
/wide_census.py
|
fb2f6e9cda1f119322125000ee0ceb0473515dea
|
[] |
no_license
|
fabriciojoc/tensorflow-widendeep
|
50498d1aca24c79c5efaedf70f3f2fd893d5990f
|
940bad1e1d2d56940d8bc5a1b76f42bdf6444044
|
refs/heads/master
| 2020-05-23T08:13:21.982075 | 2016-11-02T22:48:36 | 2016-11-02T22:48:36 | 70,288,048 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,177 |
py
|
# ref: www.tensorflow.org/versions/r0.11/tutorials/wide_and_deep/index.html
import tempfile
import urllib
import pandas as pd
import tensorflow as tf
import numpy as np
##
## 1 - READ DATA
##
# temporary files for train and test
train_file = tempfile.NamedTemporaryFile()
test_file = tempfile.NamedTemporaryFile()
# get train
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", train_file.name)
# get test
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", test_file.name)
# dataset columns
# the last one is the label
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
# read dataset
df_train = pd.read_csv(train_file, names=COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(test_file, names=COLUMNS, skipinitialspace=True, skiprows=1)
# remove NaN last element for each column
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
##
## 2 - CONVERT DATA TO TENSORS
##
def input_fn(df):
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {}
for k in CONTINUOUS_COLUMNS:
continuous_cols[k] = tf.constant(df[k].values, name=k)
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {}
for k in CATEGORICAL_COLUMNS:
# indices = elements that have nonzero values
indices = []
for i in range(df[k].size):
indices.append([i,0])
categorical_cols[k] = tf.SparseTensor(indices=indices, values=df[k].values, shape=[df[k].size,1])
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Convert labels to integer
labels = []
uniq_labels = np.unique(df[LABEL_COLUMN].values)
for i in df[LABEL_COLUMN].values:
for j in range(len(uniq_labels)):
if i == uniq_labels[j]:
labels.append(j)
# Converts the label column into a constant Tensor.
label = tf.constant(labels)
# Returns the feature columns and the label.
return feature_cols, label
def train_input_fn():
return input_fn(df_train)
def test_input_fn():
return input_fn(df_test)
# label column
LABEL_COLUMN = "income_bracket"
# categorical columns
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
# continous columns
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"]
##
## 3 - MODEL FEATURES
##
# categorical feature columns
workclass = tf.contrib.layers.sparse_column_with_hash_bucket("workclass", hash_bucket_size=100)
education = tf.contrib.layers.sparse_column_with_hash_bucket("education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket("marital_status", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket("occupation", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket("relationship", hash_bucket_size=100)
race = tf.contrib.layers.sparse_column_with_keys(column_name="race", keys=[
"Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "Other", "White"])
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
native_country = tf.contrib.layers.sparse_column_with_hash_bucket("native_country", hash_bucket_size=1000)
# continuous feature columns
age = tf.contrib.layers.real_valued_column("age")
# transform age to categorical
age_buckets = tf.contrib.layers.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# crossed features columns
education_x_occupation = tf.contrib.layers.crossed_column([education, occupation], hash_bucket_size=int(1e4))
native_country_x_occupation = tf.contrib.layers.crossed_column([native_country, occupation], hash_bucket_size=int(1e4))
age_buckets_x_race_x_occupation = tf.contrib.layers.crossed_column(
[age_buckets, race, occupation], hash_bucket_size=int(1e6))
age_buckets_x_education_x_occupation = tf.contrib.layers.crossed_column(
[age_buckets, education, occupation], hash_bucket_size=int(1e6))
##
## 4 - WIDE COLUMNS
##
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets, education_x_occupation,
age_buckets_x_education_x_occupation, native_country_x_occupation]
##
## 5 - DEEP COLUMNS
##
# Each of the sparse, high-dimensional categorical features are first converted
# into a low-dimensional and dense real-valued vector, often referred to as an
# embedding vector
deep_columns = [ tf.contrib.layers.embedding_column(gender, dimension=8), tf.contrib.layers.embedding_column(native_country, dimension=8), tf.contrib.layers.embedding_column(education, dimension=8), tf.contrib.layers.embedding_column(occupation, dimension=8), tf.contrib.layers.embedding_column(workclass, dimension=8), tf.contrib.layers.embedding_column(relationship, dimension=8), age, education_num, capital_gain, capital_loss, hours_per_week ]
##
## 6 - MODEL CREATION
##
model_dir = tempfile.mkdtemp()
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
##
## 7 - MODEL TRAIN AND TEST
##
m.fit(input_fn=train_input_fn, steps=200)
results = m.evaluate(input_fn=test_input_fn, steps=1)
for key in sorted(results):
print "%s: %s" % (key, results[key])
|
[
"[email protected]"
] | |
441d38328a0e5a8f371a6a953e736b867d669e96
|
c2cd227edaea2a0d0c0c95fb683c81f9cdb9673f
|
/data/generator.py
|
27465e0d2e8c7cc11534f6eb9f45c9bbc3445151
|
[] |
no_license
|
qhuang18-97/CIS700_proj
|
a3460af6b00dd67380eac1b0e465d308b9108877
|
ca060cb0a1c17fa1c8bd23a3aeb8db7666692dba
|
refs/heads/master
| 2023-04-11T01:27:20.334205 | 2021-04-28T04:29:29 | 2021-04-28T04:29:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,531 |
py
|
import pickle as pk
import random
# range of random integer
up_bound = 100
low_bound = -100
# DP solution
def mss(arr):
dp = []
dp.append(arr[0])
result = arr[0]
for i in range(1, len(arr)):
curr = max(dp[i - 1] + arr[i], arr[i])
dp.append(curr)
result = max(result, curr)
return result
def input_generator(length):
arr = []
for i in range(length):
arr.append(random.randint(low_bound, up_bound))
return arr
def mss_generator(data_size, file, data_length):
input_data = []
output_data = []
for i in range(data_size):
arr = input_generator(data_length)
input_data.append(arr)
result = mss(arr)
output_data.append(result)
with open(file, "wb") as f:
pk.dump((input_data, output_data), f)
# brute force solution
def check_sum(arr, out):
maximum = arr[0]
for i in range(len(arr)):
curr = 0
for j in range(i, len(arr)):
curr += arr[j]
if curr > maximum:
maximum = curr
if out == maximum:
print(True)
else:
print(False)
def check(file):
with open(file, "rb") as f:
(x, y) = pk.load(f)
length = len(x)
for i in range(length):
check_sum(x[i], y[i])
if __name__ == '__main__':
train_file = "train.txt"
test_file = "test.txt"
test_file_var = "test_var.txt"
mss_generator(10000, train_file, 10)
mss_generator(1000, test_file, 10)
mss_generator(1000, test_file_var, 20)
|
[
"[email protected]"
] | |
8e7f462f54a5e8c377193b2b99995a768d177c25
|
88162a141dc3361bf0574b29b41096e57623d8e9
|
/src/python/gjettelek/gjettelek.py
|
1a26234825a987cea8aaa2609ab7add34e5498be
|
[] |
no_license
|
kodeklubben/oppgaver
|
8ddc14297b33518d4532fe5c90170e981a9593a0
|
eb4b7d786563fc3c1d1b0324d00018198440aa1e
|
refs/heads/master
| 2023-09-05T21:09:37.505409 | 2023-08-01T06:08:56 | 2023-08-01T06:08:56 | 17,453,180 | 43 | 196 | null | 2023-08-23T11:27:25 | 2014-03-05T19:59:03 |
Python
|
UTF-8
|
Python
| false | false | 272 |
py
|
# gjettelek.py
from random import randint
number = randint(1, 100)
guess = 0
while guess != number:
guess = int(input("Please guess a number: "))
if (guess < number):
print("Higher!")
elif (guess > number):
print("Lower!")
print("Correct!")
|
[
"[email protected]"
] | |
ba79eca0f24f1d37729244332dbcd676824f7146
|
a4643b30586a66e9c7c189020bdf8d8a656215b0
|
/template_day/template.py
|
de581a31ddd960751b69467895e491353e44ec40
|
[] |
no_license
|
Crinibus/adventofcode
|
db7bf6cb9098db388d2cc418ecc7df5e0f474323
|
1c81e388587fc7db3b59ec6ab6c38143880d90c9
|
refs/heads/master
| 2022-12-17T07:45:20.556732 | 2022-12-07T07:48:29 | 2022-12-07T07:48:29 | 225,465,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 622 |
py
|
import pathlib
def get_input() -> list[str]:
root_path = pathlib.Path(__file__).parent.absolute()
with open(f"{root_path}/input.txt", "r") as input_file:
input_raw = input_file.readlines()
return [line.strip() for line in input_raw]
def get_answer_part_1(input_data: list[str]):
pass
def get_answer_part_2(input_data: list[str]):
pass
def main():
input_data = get_input()
answer1 = get_answer_part_1(input_data)
answer2 = get_answer_part_2(input_data)
print(f"Part 1 answer: {answer1}")
print(f"Part 2 answer: {answer2}")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
2dbdd3de22cd35307bdf8d469a9124387145b253
|
2fc57e27bd74ca06a43f7ba6d192018cb282fc01
|
/functions_lab_1/start_point/tests/python_functions_test.py
|
6188ba6529c77b65131fa43d46a9947de6359d1c
|
[] |
no_license
|
pvaliani/codeclan_hw2
|
39d59b77a6378f75eeca9f5a88afd752cf7ab095
|
f5b1ef72120e6aa317b1c700b025a7f48e3cb459
|
refs/heads/master
| 2023-01-04T09:39:07.185893 | 2020-11-04T21:45:53 | 2020-11-04T21:45:53 | 310,092,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,987 |
py
|
from src.python_functions_practice import *
import unittest
class TestPythonFunctionPractice(unittest.TestCase):
def test_return_10(self):
return_10_result = return_10()
self.assertEqual( 10, return_10_result )
def test_add(self):
add_result = add( 1, 2 )
self.assertEqual( 3, add_result )
def test_subtract(self):
subtract_result = subtract( 10, 5 )
self.assertEqual( 5, subtract_result )
def test_multiply(self):
multiply_result = multiply( 4, 2 )
self.assertEqual( 8, multiply_result )
def test_divide(self):
divide_result = divide( 10, 2 )
self.assertEqual( 5, divide_result )
def test_length_of_string(self):
test_string = "A string of length 21"
string_length = length_of_string( test_string )
self.assertEqual( 21, string_length )
def test_join_string(self):
string_1 = "Mary had a little lamb, "
string_2 = "its fleece was white as snow"
joined_string = join_string( string_1, string_2 )
self.assertEqual( "Mary had a little lamb, its fleece was white as snow", joined_string )
def test_add_string_as_number(self):
add_result = add_string_as_number( "1", "2" )
self.assertEqual( 3, add_result )
def test_number_to_full_name__month_1(self):
result = number_to_full_month_name( 1 )
self.assertEqual( "January", result )
def test_number_to_full_name__month_3(self):
result = number_to_full_month_name( 3 )
self.assertEqual( "March", result )
def test_number_to_full_name__month_9(self):
result = number_to_full_month_name( 9 )
self.assertEqual( "September", result )
def test_number_to_short_month_name__month_1(self):
first_month_string = number_to_short_month_name( 1 )
self.assertEqual( "Jan", first_month_string )
def test_number_to_short_month_name__month_4(self):
fourth_month_string = number_to_short_month_name( 4 )
self.assertEqual( "Apr", fourth_month_string )
def test_number_to_short_month_name__month_10(self):
tenth_month_string = number_to_short_month_name( 10 )
self.assertEqual( "Oct", tenth_month_string )
#Further
#Given the length of a side of a cube calculate the volume
#@unittest.skip("delete this line to run the test")
def test_volume_of_cube(self):
volume_of_cube_num = volume_of_cube( 1 )
self.assertEqual( 1, volume_of_cube_num)
#Given a String, return the String reversed
#@unittest.skip("delete this line to run the test")
def test_reverse_string(self):
reverse_string_test = reverse_string( "i" )
self.assertEqual( "i", reverse_string_test)
#Given a value in farenheit, convert this into celsius.
#@unittest.skip("delete this line to run the test")
def test_fahrenheit_to_celsius(self):
fahrenheit_num = fahrenheit_to_celsius( 50 )
self.assertEqual( 10, fahrenheit_num)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
b0d93276c1287ebbdf2b60f2d961861d6bf463aa
|
8218ac4b1ad2cf0ac55d7eda19b2e466ad078402
|
/venv/lib/python3.7/site-packages/pyecharts/datasets/__init__.py
|
c6e0dd90d949034fa97239685245dc73bb61e91d
|
[] |
no_license
|
william-xiangzi/NetworkTest
|
07044c58976aa0d3d6325f81d3b17d51e5e9bc54
|
89500dabd09b64407056c8a45997cfdea2b14a41
|
refs/heads/master
| 2020-07-04T01:53:57.757693 | 2019-08-13T09:52:48 | 2019-08-13T09:52:48 | 202,114,781 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,150 |
py
|
# coding=utf-8
import json
import os
import urllib.request
from ..commons.types import Optional
__HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(__HERE, "map_filename.json"), "r", encoding="utf8") as f:
FILENAMES: dict = json.load(f)
with open(os.path.join(__HERE, "city_coordinates.json"), "r", encoding="utf8") as f:
COORDINATES: dict = json.load(f)
EXTRA = {}
def register_url(asset_url: Optional[str]):
if asset_url:
registry = asset_url + "/registry.json"
try:
contents = urllib.request.urlopen(registry).read()
contents = json.loads(contents)
except Exception as e:
raise e
files = {}
for name, pinyin in contents["PINYIN_MAP"].items():
file_name = contents["FILE_MAP"][pinyin]
files[name] = [file_name, "js"]
EXTRA[contents["GITHUB_URL"] + "/"] = files
def register_files(asset_files: Optional[dict]):
if asset_files:
FILENAMES.update(asset_files)
def register_coords(coords: dict):
if coords:
COORDINATES.update(coords)
|
[
"[email protected]"
] | |
64801be0735e6c4264e2fcac275da94b245371ca
|
2ed6ad4a736879a47d192159da45ca56610c089a
|
/tests/test_db.py
|
22393c254cb71d6912d534a4a6399d1eabd15537
|
[
"MIT"
] |
permissive
|
poonyisaTH/gsheets-db-api
|
a82bd35984766697757cc96aa74a1281d948f019
|
f023b32986d4da9a501fca8d435f2b6edc153353
|
refs/heads/master
| 2023-05-29T15:01:10.604324 | 2021-02-17T20:59:41 | 2021-02-17T20:59:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,045 |
py
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import unittest
import requests_mock
from .context import (
apply_parameters,
Connection,
connect,
exceptions,
)
class DBTestSuite(unittest.TestCase):
header_payload = {
'table': {
'cols': [
{'id': 'A', 'label': 'country', 'type': 'string'},
{
'id': 'B',
'label': 'cnt',
'type': 'number',
'pattern': 'General',
},
],
},
}
query_payload = {
'status': 'ok',
'table': {
'cols': [
{'id': 'A', 'label': 'country', 'type': 'string'},
{
'id': 'B',
'label': 'cnt',
'type': 'number',
'pattern': 'General',
},
],
'rows': [
{'c': [{'v': 'BR'}, {'v': 1.0, 'f': '1'}]},
{'c': [{'v': 'IN'}, {'v': 2.0, 'f': '2'}]},
],
},
}
def test_connection(self):
conn = connect()
self.assertFalse(conn.closed)
self.assertEqual(conn.cursors, [])
def test_check_closed(self):
conn = connect()
conn.close()
with self.assertRaises(exceptions.Error):
conn.close()
def test_close_cursors(self):
conn = connect()
cursor1 = conn.cursor()
cursor2 = conn.cursor()
cursor2.close()
conn.close()
self.assertTrue(cursor1.closed)
self.assertTrue(cursor2.closed)
def test_commit(self):
conn = connect()
conn.commit() # no-op
@requests_mock.Mocker()
def test_connection_execute(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
with Connection() as conn:
result = conn.execute(
'SELECT * FROM "http://docs.google.com/"').fetchall()
Row = namedtuple('Row', 'country cnt')
expected = [Row(country=u'BR', cnt=1.0), Row(country=u'IN', cnt=2.0)]
self.assertEqual(result, expected)
@requests_mock.Mocker()
def test_cursor_execute(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
with Connection() as conn:
cursor = conn.cursor()
result = cursor.execute(
'SELECT * FROM "http://docs.google.com/"').fetchall()
Row = namedtuple('Row', 'country cnt')
expected = [Row(country=u'BR', cnt=1.0), Row(country=u'IN', cnt=2.0)]
self.assertEqual(result, expected)
def test_cursor_executemany(self):
conn = Connection()
cursor = conn.cursor()
with self.assertRaises(exceptions.NotSupportedError):
cursor.executemany('SELECT * FROM "http://docs.google.com/"')
@requests_mock.Mocker()
def test_cursor(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
conn = Connection()
cursor = conn.cursor()
cursor.setinputsizes(0) # no-op
cursor.setoutputsizes(0) # no-op
@requests_mock.Mocker()
def test_cursor_rowcount(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
conn = Connection()
cursor = conn.cursor()
with self.assertRaises(exceptions.Error):
cursor.rowcount()
cursor.execute('SELECT * FROM "http://docs.google.com/"')
self.assertEqual(cursor.rowcount, 2)
@requests_mock.Mocker()
def test_cursor_fetchone(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
conn = Connection()
cursor = conn.cursor()
cursor.execute('SELECT * FROM "http://docs.google.com/"')
Row = namedtuple('Row', 'country cnt')
self.assertEqual(cursor.fetchone(), Row(country=u'BR', cnt=1.0))
self.assertEqual(cursor.fetchone(), Row(country=u'IN', cnt=2.0))
self.assertIsNone(cursor.fetchone())
@requests_mock.Mocker()
def test_cursor_fetchall(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
conn = Connection()
cursor = conn.cursor()
cursor.execute('SELECT * FROM "http://docs.google.com/"')
Row = namedtuple('Row', 'country cnt')
self.assertEqual(cursor.fetchone(), Row(country=u'BR', cnt=1.0))
self.assertEqual(cursor.fetchall(), [Row(country=u'IN', cnt=2.0)])
@requests_mock.Mocker()
def test_cursor_fetchmany(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
conn = Connection()
cursor = conn.cursor()
cursor.execute('SELECT * FROM "http://docs.google.com/"')
Row = namedtuple('Row', 'country cnt')
self.assertEqual(cursor.fetchmany(1), [Row(country=u'BR', cnt=1.0)])
self.assertEqual(cursor.fetchmany(10), [Row(country=u'IN', cnt=2.0)])
self.assertEqual(cursor.fetchmany(100), [])
@requests_mock.Mocker()
def test_cursor_iter(self, m):
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%200',
json=self.header_payload,
)
m.get(
'http://docs.google.com/gviz/tq?gid=0&tq=SELECT%20%2A',
json=self.query_payload,
)
conn = Connection()
cursor = conn.cursor()
cursor.execute('SELECT * FROM "http://docs.google.com/"')
Row = namedtuple('Row', 'country cnt')
self.assertEqual(
list(cursor),
[Row(country=u'BR', cnt=1.0), Row(country=u'IN', cnt=2.0)],
)
def test_apply_parameters(self):
query = 'SELECT * FROM table WHERE name=%(name)s'
parameters = {'name': 'Alice'}
result = apply_parameters(query, parameters)
expected = "SELECT * FROM table WHERE name='Alice'"
self.assertEqual(result, expected)
def test_apply_parameters_escape(self):
query = 'SELECT * FROM table WHERE name=%(name)s'
parameters = {'name': "O'Malley's"}
result = apply_parameters(query, parameters)
expected = "SELECT * FROM table WHERE name='O''Malley''s'"
self.assertEqual(result, expected)
def test_apply_parameters_float(self):
query = 'SELECT * FROM table WHERE age=%(age)s'
parameters = {'age': 50}
result = apply_parameters(query, parameters)
expected = "SELECT * FROM table WHERE age=50"
self.assertEqual(result, expected)
def test_apply_parameters_bool(self):
query = 'SELECT * FROM table WHERE active=%(active)s'
parameters = {'active': True}
result = apply_parameters(query, parameters)
expected = "SELECT * FROM table WHERE active=TRUE"
self.assertEqual(result, expected)
def test_apply_parameters_list(self):
query = (
'SELECT * FROM table '
'WHERE id IN %(allowed)s '
'AND id NOT IN %(prohibited)s'
)
parameters = {'allowed': [1, 2], 'prohibited': (2, 3)}
result = apply_parameters(query, parameters)
expected = (
'SELECT * FROM table '
'WHERE id IN (1, 2) '
'AND id NOT IN (2, 3)'
)
self.assertEqual(result, expected)
def test_apply_parameters_star(self):
query = 'SELECT %(column)s FROM table'
parameters = {'column': '*'}
result = apply_parameters(query, parameters)
expected = "SELECT * FROM table"
self.assertEqual(result, expected)
|
[
"[email protected]"
] | |
fbd540a9a8a2dc77e250b42930f27847e6734bb8
|
53015e1d44805dc884b282583608ad5a03dcc8a0
|
/P25.py
|
a90e2715fab96b790dcef6f1b2647bc295e22732
|
[] |
no_license
|
mitali-1703/Python-Lab-Work
|
0db24ed5d663f8b0ad09867594ad86d9c30b9b0d
|
30438481fd46fcfac93f06dd6cda2b961914f881
|
refs/heads/master
| 2023-04-22T01:57:51.526041 | 2021-05-13T18:38:14 | 2021-05-13T18:38:14 | 295,008,264 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 367 |
py
|
#Write a function calculation() which accepts 2 variables and calculates their sum and
# difference in a single return call.
def calculation(x,y):
sum=x+y
diff=x-y
return(sum,diff)
a=int(input("Enter first number:"))
b=int(input("Enter second number:"))
s,d=calculation(a,b)
print("The sum and difference of the numbers respectively is:",s,d)
|
[
"[email protected]"
] | |
37d23ae628c21b76f4715d973d1d08d02af4b6ca
|
7ab15522084e2f81d39cda505da844fb4d519f9d
|
/Linear DS/Hard/Array Manipulation/array_manipulation.py
|
965441f349f2bce6c1db189177727e984daceb2b
|
[] |
no_license
|
Infinite-Loop-KJSIEIT/Algo-Talks
|
1662cfd802bfbe4a9bfcf80a9c3157334e5cb4fd
|
27d85ae3827f8765a4ebe98c80cc55b53c0562b0
|
refs/heads/master
| 2022-12-25T21:53:57.745115 | 2020-10-03T07:07:02 | 2020-10-03T07:07:02 | 286,681,402 | 13 | 3 | null | 2020-10-03T07:07:04 | 2020-08-11T07:53:23 |
Python
|
UTF-8
|
Python
| false | false | 419 |
py
|
import sys
def uno(): return int(sys.stdin.readline().strip())
def dos(): return sys.stdin.readline().strip()
def tres(): return map(int, sys.stdin.readline().strip().split())
def cuatro(): return sys.stdin.readline().strip().split()
n, m = tres()
ar, mx, sm = [0]*(n+1), 0, 0
for i in range(m):
a, b, k = tres()
ar[a-1] += k
ar[b] -= k
for i in range(n+1):
sm += ar[i]
mx = max(mx, sm)
print(mx)
|
[
"[email protected]"
] | |
ae57b70ed6eda0f92b39385332ced1e0fcf017ea
|
a98947b0dee48d5a014db44f31889ac6cf3a1107
|
/LD_GAMP_R/Test_LD_GAMP_R.py
|
cafeae2093acc0ee15c0b95649b1307984d078e3
|
[] |
no_license
|
PeiKaLunCi/LD-GAMP
|
bc00139b71ba092f63fe85c60b08101fa32063a8
|
455da27f00c752531cb4531dcb282f2d8cb1f9e2
|
refs/heads/master
| 2020-12-15T21:16:26.416264 | 2020-03-07T14:43:45 | 2020-03-07T14:43:45 | 245,648,986 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,546 |
py
|
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import LD_GAMP_R as LD_GAMP_R
import random
import h5py
#np.set_printoptions(threshold=1e10)
## Network Parameters
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
alg="GAMP"
tie_weights=False
height_img = 256
width_img = 256
channel_img = 1 # RGB -> 3, Grayscale -> 1
filter_height = 3
filter_width = 3
num_filters = 64
n_DnCNN_layers=16
n_GAMP_layers=10
TrainLoss='MSE'
## Training parameters (Selects which weights to use)
LayerbyLayer=True
DenoiserbyDenoiser=False#Overrides LayerbyLayer
if DenoiserbyDenoiser:
LayerbyLayer=float('as')
## Testing/Problem Parameters
BATCH_SIZE = 1#Using a batch size larger than 1 will hurt the denoiser by denoiser trained network because it will use an average noise level, rather than a noise level specific to each image
n_Test_Images = 5
sampling_rate_test=.25#The sampling rate used for testing
sampling_rate_train=.2#The sampling rate that was used for training
#sampling_rate_test = 1.
#sampling_rate_train = 1.
sigma_w=1./255. #Noise std # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#sigma_w = np.sqrt(10)
#sigma_w = 0.1
n=channel_img*height_img*width_img #
m=int(np.round(sampling_rate_test*n))
measurement_mode='gaussian'#'coded-diffraction'#'gaussian'#'complex-gaussian'#
# Parameters to to initalize weights. Won't be used if old weights are loaded
init_mu = 0
init_sigma = 0.1
#num_layers_in_ed_block = 8
#growth_rate = 2
n_bit = 4.0
#n_bit = 4.0
#n_bit = 4.0
#n_bit = 16.0
#n_bit = 12.0
random.seed(1)
LD_GAMP_R.SetNetworkParams(new_height_img=height_img, new_width_img=width_img, new_channel_img=channel_img, \
new_filter_height=filter_height, new_filter_width=filter_width, new_num_filters=num_filters, \
new_n_GAMP_layers=n_GAMP_layers,
new_sampling_rate=sampling_rate_test, \
new_BATCH_SIZE=BATCH_SIZE, new_sigma_w=sigma_w, new_n=n, new_m=m, new_training=False, use_adaptive_weights=DenoiserbyDenoiser)
LD_GAMP_R.ListNetworkParameters()
# tf Graph input
x_true = tf.placeholder(tf.float32, [n, BATCH_SIZE])
#Create handles for the measurement operator
[A_handle, At_handle, A_val, A_val_tf] = LD_GAMP_R.GenerateMeasurementOperators(measurement_mode)
## Initialize the variable theta which stores the weights and biases
if tie_weights == True:
theta = [None]
with tf.variable_scope("Iter" + str(0)):
theta_thisIter = LD_GAMP_R.init_vars_ResNet(init_mu, init_sigma)
theta[0] = theta_thisIter
elif DenoiserbyDenoiser:
noise_min_stds = [0, 10, 20, 40, 60, 80, 100, 150, 300]#This is currently hardcoded within LearnedDGAMP_ResNet_functionhelper
noise_max_stds = [10, 20, 40, 60, 80, 100, 150, 300, 500] # This is currently hardcoded within LearnedDGAMP_ResNet_functionhelper
theta = [None]*len(noise_min_stds)
for noise_level in range(len(noise_min_stds)):
with tf.variable_scope("Adaptive_NL"+str(noise_level)):
theta[noise_level]= LD_GAMP_R.init_vars_ResNet(init_mu, init_sigma)
else:
n_layers_trained = n_GAMP_layers
theta = [None] * n_layers_trained
for iter in range(n_layers_trained):
with tf.variable_scope("Iter" + str(iter)):
theta_thisIter = LD_GAMP_R.init_vars_ResNet(init_mu, init_sigma)
theta[iter] = theta_thisIter
## Construct model
z, z_w, noise_vec, quan_step, DeltaTh, Q_out, y_R, y_measured = LD_GAMP_R.GenerateNoisyCSData_handles(x_true, A_handle, sigma_w, A_val_tf, n_bit)
#quan_step, y_measured = LD_GAMP_R.GenerateNoisyCSData_handles_Ex(x_true, A_handle, sigma_w, A_val_tf, n_bit)
if alg == 'GAMP':
(mhat, s_list, mhat_list, vhat_list, V_list, Z_list, ztem_list, vtem_list, t_list, Sigma_list, R_list, layers_list) = LD_GAMP_R.LDGAMP_ResNet(y_measured,
A_handle,
At_handle,
A_val_tf, theta,
x_true, sigma_w,
quan_step=quan_step, n_bit=n_bit,
tie=tie_weights)
#(x_hat, MSE_history, NMSE_history, PSNR_history, r, rvar, dxdr) = LD_GAMP_R.LDGAMP_ResNet(y_measured, A_handle, At_handle, A_val_tf, theta, x_true, tie=tie_weights)
elif alg == 'DIT':
(x_hat, MSE_history, NMSE_history, PSNR_history) = LD_GAMP_R.LDIT(y_measured, A_handle, At_handle, A_val_tf, theta, x_true, tie=tie_weights)
else:
raise ValueError('alg was not a supported option')
## Load and Preprocess Test Data
if height_img>50:
test_im_name = "../TrainingData/StandardTestData_" + str(height_img) + "Res.npy"
else:
test_im_name = "../TrainingData/ValidationData_patch" + str(height_img) + ".npy"
test_images = np.load(test_im_name)
test_images=test_images[:,0,:,:]
assert (len(test_images)>=n_Test_Images), "Requested too much Test data"
x_test = np.transpose( np.reshape(test_images, (-1, height_img * width_img * channel_img)))
# with tf.Session() as sess:
# y_test=sess.run(y_measured,feed_dict={x_true: x_test, A_val_tf: A_val})#All the batches will use the same measurement matrix
## Test the Model
saver = tf.train.Saver() # defaults to saving all variables
saver_dict={}
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config = config) as sess:
#with tf.Session() as sess:
if tie_weights == 1: # Load weights from pretrained denoiser
save_name = LD_GAMP_R.GenResNetFilename(80. / 255.) + ".ckpt"
for l in range(0, n_ResNet_layers):
saver_dict.update({"l" + str(l) + "/w": theta[0][0][l]})#, "l" + str(l) + "/b": theta[0][1][l]})
for l in range(1, n_ResNet_layers - 1): # Associate variance, means, and beta
gamma_name = "Iter" + str(0) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Iter" + str(0) + "/l" + str(l) + "/BN/beta:0"
var_name = "Iter" + str(0) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Iter" + str(0) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"l" + str(l) + "/BN/beta": beta})
saver_dict.update({"l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update({"l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, save_name)
elif DenoiserbyDenoiser:
for noise_level in range(len(noise_min_stds)):
noise_min_std=noise_min_stds[noise_level]
noise_max_std = noise_max_stds[noise_level]
save_name = LD_GAMP_R.GenResNetFilename(noise_min_std/ 255.,noise_max_std/255.) + ".ckpt"
for l in range(0, n_ResNet_layers):
saver_dict.update({"l" + str(l) + "/w": theta[noise_level][0][l]})#, "l" + str(l) + "/b": theta[noise_level][1][l]})
for l in range(1, n_ResNet_layers - 1): # Associate variance, means, and beta
gamma_name = "Adaptive_NL"+str(noise_level) + "/l" + str(l) + "/BN/gamma:0"
beta_name = "Adaptive_NL"+str(noise_level) + "/l" + str(l) + "/BN/beta:0"
var_name = "Adaptive_NL"+str(noise_level) + "/l" + str(l) + "/BN/moving_variance:0"
mean_name = "Adaptive_NL"+str(noise_level) + "/l" + str(l) + "/BN/moving_mean:0"
gamma = [v for v in tf.global_variables() if v.name == gamma_name][0]
beta = [v for v in tf.global_variables() if v.name == beta_name][0]
moving_variance = [v for v in tf.global_variables() if v.name == var_name][0]
moving_mean = [v for v in tf.global_variables() if v.name == mean_name][0]
saver_dict.update({"l" + str(l) + "/BN/gamma": gamma})
saver_dict.update({"l" + str(l) + "/BN/beta": beta})
saver_dict.update({"l" + str(l) + "/BN/moving_variance": moving_variance})
saver_dict.update({"l" + str(l) + "/BN/moving_mean": moving_mean})
saver_initvars = tf.train.Saver(saver_dict)
saver_initvars.restore(sess, save_name)
else:
print('Restore !!!')
#save_name = LD_GAMP_R.GenLDGAMP_ResNetFilename(alg, tie_weights, LayerbyLayer) + ".ckpt"
save_name = LD_GAMP_R.GenLDGAMP_ResNetFilename(alg, tie_weights, LayerbyLayer,sampling_rate_override=sampling_rate_train,loss_func=TrainLoss) + ".ckpt"
saver.restore(sess, save_name)
print("Reconstructing Signal")
start_time = time.time()
#Final_PSNRs=[]
"""
for offset in range(0, n_Test_Images - BATCH_SIZE + 1, BATCH_SIZE): # Subtract batch size-1 to avoid eerrors when len(train_images) is not a multiple of the batch size
end = offset + BATCH_SIZE
# batch_y_test = y_test[:, offset:end] #To be used when using precomputed measurements
# Generate a new measurement matrix
A_val = LD_GAMP_R.GenerateMeasurementMatrix(measurement_mode)
#A_val = LD_GAMP_R.GenerateMeasurementMatrix_Ex(measurement_mode)
batch_x_test = x_test[:, offset:end]
# Run optimization. This will both generate compressive measurements and then recontruct from them.
#batch_x_recon, batch_MSE_hist, batch_NMSE_hist, batch_PSNR_hist = sess.run([x_hat, MSE_history, NMSE_history, PSNR_history], feed_dict={x_true: batch_x_test, A_val_tf: A_val})
batch_x_recon = sess.run(mhat, feed_dict={x_true: batch_x_test, A_val_tf: A_val})
"""
#Final_PSNRs.append(batch_PSNR_hist[-1][0])
#print(Final_PSNRs)
#print(np.mean(Final_PSNRs))
A_val = LD_GAMP_R.GenerateMeasurementMatrix(measurement_mode)
batch_x_test = x_test[:, n_Test_Images - 1]
batch_x_test = np.reshape(batch_x_test, newshape=(batch_x_test.shape[0], 1))
batch_x_recon = sess.run(mhat, feed_dict={x_true: batch_x_test, A_val_tf: A_val})
fig1 = plt.figure()
plt.imshow(np.transpose(np.reshape(x_test[:, n_Test_Images-1], (height_img, width_img))), interpolation='nearest', cmap='gray')
plt.show()
#plt.imsave('./first_.png', np.transpose(np.reshape(x_test[:, n_Test_Images-1], (height_img, width_img))))
fig2 = plt.figure()
plt.imshow(np.transpose(np.reshape(batch_x_recon[:, 0], (height_img, width_img))), interpolation='nearest', cmap='gray')
plt.show()
#plt.imsave('./second_.png', np.transpose(np.reshape(batch_x_recon[:, 0], (height_img, width_img))))
#fig3 = plt.figure()
#plt.plot(range(n_GAMP_layers+1), np.mean(batch_PSNR_hist,axis=1))
#plt.title("PSNR over " +str(alg)+" layers")
#plt.show()
print(x_test.shape)
print(batch_x_recon.shape)
x1 = x_test[:, n_Test_Images - 1]
x2 = batch_x_recon[:, 0]
#x_loss = np.sqrt(np.sum(np.square(x1 - x2)))
x_loss = np.sqrt(np.mean(np.square(x1 - x2)))
x_loss1 = np.sqrt(np.sum(np.square(x1 - x2)))
print(x1)
print(x2)
print(x_loss)
print(x_loss1)
MSE = np.mean(np.square(x1 - x2))
psnr = -10 * np.log(MSE) / np.log(10)
print(psnr)
|
[
"[email protected]"
] | |
4558b73f4309e412016f5c1d22d3652908e71d01
|
c2c84c98f2247f2a9fe280e41f3a4dc74fd4de1a
|
/online/analyses.py
|
73a0d03dbb5a0da0b17ff4129ab1c019baf63cab
|
[
"MIT"
] |
permissive
|
mrware91/tmolv29
|
153ded42ee190287442330943a2a9c51d8e55243
|
823321f2505b684e9fd1de1c01f4e46997f1e307
|
refs/heads/main
| 2023-04-06T13:55:09.926010 | 2021-04-14T14:26:05 | 2021-04-14T14:26:05 | 347,172,169 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,299 |
py
|
# Contributors: Matt Ware
import numpy as np
class analyses:
def __init__(self, analysis, totalEvents,printMode='verbose'):
self.analysis = analysis
self.totalEvents = totalEvents
self.events = 0
self.printMode = printMode
self.data = {}
self.dataTypesFound = False
self.outTypes = {}
self.initialize()
def initialize(self):
self.events = 0
self.data = {}
for key in self.analysis:
self.outTypes[key] = None
self.analysis[key]['type'] = None
self.analysis[key]['size'] = None
self.data[key] = np.zeros(self.totalEvents)*np.nan
self.setdefault(self.analysis[key],
'function',
'%s: No analysis function provided. Defaulting to return raw data.'%key,
lambda x: x)
self.setdefault(self.analysis[key],
'analyzeEvery',
'%s: No modulo provided. Will analyze every shot.'%key,
1)
def update(self, detectors):
self.dataTypesFound = True
for key in self.analysis:
analyzeEvery = self.analysis[key]['analyzeEvery']
if not ( self.events%analyzeEvery == 0):
continue
function = self.analysis[key]['function']
detectorKey = self.analysis[key]['detectorKey']
shotData = detectors[detectorKey]['shotData']
if (shotData is None) & (self.analysis[key]['type'] is None):
self.dataTypesFound = False
continue
elif (shotData is None) & (self.analysis[key]['type'] is not None):
self.data[key][self.events,] = self.data[key][self.events,]*np.nan
continue
result = function(shotData)
if result is not None:
if self.analysis[key]['type'] is None:
self.analysis[key]['type'] = type(result)
self.analysis[key]['size'] = np.size(result)
dims = np.shape(result)
self.data[key] = np.zeros((self.totalEvents,*dims))*np.nan
self.data[key][self.events,] = result
if self.outTypes[key] is None:
self.outTypes[key] = {}
self.outTypes[key]['type'] = type(self.data[key][self.events,])
self.outTypes[key]['size'] = np.size( self.data[key][self.events,] )
elif (result is None) & (self.analysis[key]['type'] is None):
self.dataTypesFound = False
self.events += 1
if self.events >= self.totalEvents:
self.cprint('Read events exceeds total expected. Resetting event count.')
self.events = 0
def setdefault(self, adict, key, response, default):
try:
adict[key]
except KeyError as ke:
allowedErrorStr = '\'%s\'' % key
if allowedErrorStr == str(ke):
self.cprint(response)
adict[key] = default
else:
raise ke
# def cprint(self,aString):
# print(aString)
def cprint(self, aString):
if self.printMode in 'verbose':
print(aString)
elif self.printMode in 'quiet':
pass
else:
print('printMode is %s. Should be verbose or quiet. Defaulting to verbose.'%self.printMode)
self.printMode = 'verbose'
self.cprint(aString)
def H5out(self):
if self.dataTypesFound:
outDict = {}
for key in self.data:
try:
outDict[key] = np.copy(self.data[key][0,:])
except IndexError as ie:
if ('1-dimensional' in str(ie)):
# print(f'dimension of {key} is {self.data[key].shape}')
outDict[key] = np.copy(self.data[key][:])
else:
raise ie
return outDict
else:
return None
|
[
"[email protected]"
] | |
45cd907e8bb791d0ec58d64af91e6306665a8ab2
|
70b8b109d389037c31ca74ddbdabcb838a6026a5
|
/password_generator_app/views.py
|
79a3ff9c71d951811dc053d51060311a525e3bf4
|
[] |
no_license
|
arnidhar/django3-password-generator
|
25dc8906d08f2f10e5a3a2093f2d6331f1ed4096
|
ee9746c05e3cdaa029814e60b48a10adcd988353
|
refs/heads/main
| 2023-07-20T20:15:33.804435 | 2021-09-01T18:15:19 | 2021-09-01T18:15:19 | 402,163,096 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 991 |
py
|
from django.shortcuts import render
from django.http import HttpResponse
import random
# Create your views here.
#def home(request):
#return HttpResponse('<h1> Hello! Welcome to the website! </h1>')
def home(request):
return render(request, 'password_generator_app/home.html', {'password': ''} )
def about(request):
return render(request, 'password_generator_app/about.html')
def password(request):
characters = list('abcdefghijklmnopqrstuvwxyz')
if request.GET.get('UpperCase'):
characters.extend(list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'))
if request.GET.get('Special'):
characters.extend(list('!@#$%^&*)('))
if request.GET.get('Numbers'):
characters.extend(list('0,1,2,3,4,5,6,7,8,9'))
length = int(request.GET.get('Length', 12))
thepassword = ''
for x in range(length):
thepassword += random.choice(characters)
return render(request, 'password_generator_app/password.html', {'password': thepassword})
|
[
"[email protected]"
] | |
fd6c788ba6b8318466159be137309f8ff4ea1a29
|
9f109d4d4fa2eb4ecec2415a21e45945a35cd58a
|
/xshop/users/tests/test_models.py
|
81150f9ff1be611e68b2606f5f69d464e95e5b0d
|
[] |
no_license
|
denokenya/xshop-web
|
4be66a39272075b778ed7dd8de996fec90b5fab8
|
262665ec4c2cb91490b219a086b8994d6eceb805
|
refs/heads/master
| 2023-06-07T02:54:57.068430 | 2020-09-13T11:24:32 | 2020-09-13T11:24:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,009 |
py
|
from django.test import TestCase
from model_bakery import baker
from ..models import User
class UserTests(TestCase):
def setUp(self) -> None:
self.user = baker.make(
User,
mobile="01010092181",
name="Ahmed Loay Shahwan",
email="[email protected]",
)
self.user1 = baker.make(User, mobile="01010092182")
def test_get_short_name(self):
self.assertEqual(self.user.get_short_name(), "Ahmed")
def test_get_full_name(self):
self.assertEqual(self.user.get_full_name(), "Ahmed Loay Shahwan")
def test_str(self):
self.assertEqual(str(self.user), "01010092181")
def test_repr(self):
# user with name
self.assertEqual(
self.user.__repr__(),
f"<User {self.user.id}: {str(self.user)} - {self.user.name}>",
)
# user without name
self.assertEqual(
self.user1.__repr__(), f"<User {self.user1.id}: {str(self.user1)}>",
)
|
[
"[email protected]"
] | |
9b82ea80ac9c7574edc4782462994c25892b1184
|
201ea8fd49f7e029dcfdd951e307cac06c3a5ab0
|
/proj4/raw_sock.py
|
119ddd9b9c5f39e31412b660b3ed9683218cd114
|
[] |
no_license
|
dingkple/FCN-Projs
|
34311356c4d93a95278d250fc2c64bdb3dd90936
|
52ee5fc572b8a6861b735005593c8d43aa32fef1
|
refs/heads/master
| 2021-05-01T19:35:20.001653 | 2016-11-14T13:57:33 | 2016-11-14T13:57:33 | 29,543,758 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,070 |
py
|
#!/usr/bin/python
import socket
import socket
from struct import *
import commands
import random
import urlparse
import time
import sys
import binascii
BASE_SEQ = 0
CSEQ_NUM = 0
SSEQ_NUM = 0
CACK_NUM = 0
SACK_NUM = 0
SBASE_SEQ = 0
MSS = 1400
IP_ID = 0
NEED_PRINT = False
DATA_RCVD = {}
USER_DATA = ' '
LAST_RCVD_TIME = {}
SENT_PKT ={}
CWD = 1
random.seed()
SRC_MAC = ''
DST_MAC = ''
GATEWAYIP = ''
ALL_PACKS = []
SRC_PORT = random.randint(30000, 60000) # source port
def decodeIpHeader(packet):
mapRet = {}
mapRet["version"] = (int(ord(packet[0])) & 0xF0)>>4
mapRet["headerLen"] = (int(ord(packet[0])) & 0x0F)<<2
mapRet["serviceType"] = hex(int(ord(packet[1])))
mapRet["totalLen"] = (int(ord(packet[2])<<8))+(int(ord(packet[3])))
mapRet["identification"] = (int( ord(packet[4])>>8 )) + (int( ord(packet[5])))
mapRet["id"] = int(ord(packet[6]) & 0xE0)>>5
mapRet["fragOff"] = int(ord(packet[6]) & 0x1F)<<8 + int(ord(packet[7]))
mapRet["ttl"] = int(ord(packet[8]))
mapRet["protocol"] = int(ord(packet[9]))
mapRet["checkSum"] = int(ord(packet[10])<<8)+int(ord(packet[11]))
mapRet["srcaddr"] = "%d.%d.%d.%d" % (int(ord(packet[12])),int(ord(packet[13])),int(ord(packet[14])), int(ord(packet[15])))
mapRet["dstaddr"] = "%d.%d.%d.%d" % (int(ord(packet[16])),int(ord(packet[17])),int(ord(packet[18])), int(ord(packet[19])))
return mapRet
def decode_tcp_header(packet, mapRet):
mapRet['src_port'] = (int(ord(packet[0])<<8)) + (int(ord(packet[1])))
mapRet['dst_port'] = (int(ord(packet[2])<<8)) + (int(ord(packet[3])))
mapRet['seq_num'] = (long(ord(packet[4])<<24)) + (long(ord(packet[5])<<16))
mapRet['seq_num'] = mapRet.get('seq_num') + (long(ord(packet[6])<<8)) + (long(ord(packet[7])))
mapRet['ack_num'] = (long(ord(packet[8])<<24)) + (long(ord(packet[9])<<16))
mapRet['ack_num'] = mapRet.get('ack_num') + (long(ord(packet[10])<<8)) + (long(ord(packet[11])))
# mapRet['ack'] = mapRet.get('ack_num') - SEQ_NUM
mapRet['data_offset'] = (int(ord(packet[12])<<4))
mapRet['ns'] = (int(ord(packet[12]) & int('00000001', 2)))
mapRet['cwr'] = (int(ord(packet[13]) & int('10000000', 2)))>>7
mapRet['ece'] = (int(ord(packet[13]) & int('01000000', 2)))>>6
mapRet['urg'] = (int(ord(packet[13]) & int('00100000', 2)))>>5
mapRet['ack'] = (int(ord(packet[13]) & int('00010000', 2)))>>4
mapRet['psh'] = (int(ord(packet[13]) & int('00001000', 2)))>>3
mapRet['rst'] = (int(ord(packet[13]) & int('00000100', 2)))>>2
mapRet['syn'] = (int(ord(packet[13]) & int('00000010', 2)))>>1
mapRet['fin'] = (int(ord(packet[13]) & int('00000001', 2)))
mapRet['window_size'] = (int(ord(packet[14])<<8)) + (int(ord(packet[15])))
mapRet['checksum'] = (int(ord(packet[16])<<8)) + (int(ord(packet[17])))
mapRet['urg_pointer'] = (int(ord(packet[18])<<8)) + (int(ord(packet[19])))
return mapRet
# checksum functions needed for calculation checksum
def checksum(msg):
s = 0
# loop taking 2 characters at a time
# print len(msg)
for i in range(0, len(msg), 2):
# print i
w = ord(msg[i]) + (ord(msg[i+1]) << 8 )
s = s + w
s = (s>>16) + (s & 0xffff);
s = s + (s >> 16);
#complement and mask to 4 byte short
s = ~s & 0xffff
return s
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def ip_header_checksum(msg):
s = 0
for i in range(0, len(msg), 2):
w = ord(msg[i]) + (ord(msg[i+1]) << 8)
s = carry_around_add(s, w)
return ~s & 0xffff
def get_local_mac_addr():
# ifconfig eth0 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'
ips = commands.getoutput("/sbin/ifconfig eth0 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'")
for ip in ips.split():
if ip[:3] != '127':
source_ip = ip
return ip
self.source_ip = ''
def get_default_gateway_linux():
"""Read the default gateway directly from /proc."""
with open("/proc/net/route") as fh:
for line in fh:
fields = line.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
return socket.inet_ntoa(pack("<L", int(fields[2], 16)))
def get_local_ip_addr():
ips = commands.getoutput("/sbin/ifconfig | grep -i \"inet\" | grep -iv \"inet6\" | " +
"awk {'print $2'} | sed -ne 's/addr\:/ /p'")
for ip in ips.split():
if ip[:3] != '127':
source_ip = ip
return ip
self.source_ip = ''
def construct_frame_header():
print 'coconstruct_frame_headerns: ',
print SRC_MAC,
print DST_MAC
eth_hdr = pack("!6s6s2s", DST_MAC.replace(':', '').decode('hex'), SRC_MAC.replace(':','').decode('hex'), '\x08\x00')
# dst_hdr = pack("!6s6s2s", '\xff\xff\xff\xff\xff\xff', dstmac.replace(':', '').decode('hex'), '\x08\x00')
packet = eth_hdr
print unpack("!6s6s2s", packet)
return packet
def construct_frame_ip_header(source_ip, dest_ip, length):
global IP_ID
IP_ID += 1
frame_header = construct_frame_header()
ip_ihl = 5
ip_ver = 4
ip_tos = 0
#Id of this packet
ip_id = IP_ID
ip_frag_off = 0
ip_ttl = 255
ip_proto = socket.IPPROTO_TCP
#os will fill the following two field
ip_tot_len = 20 + length
ip_check = 0
ip_saddr = socket.inet_aton (source_ip)
ip_daddr = socket.inet_aton (dest_ip)
ip_ihl_ver = (ip_ver << 4) + ip_ihl
ip_header = pack('!BBHHHBBH4s4s' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off,
ip_ttl, ip_proto, ip_check, ip_saddr, ip_daddr)
cs = checksum(ip_header)
ip_check = cs & 0xffff
ip_header = pack('!BBHHHBB' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off,
ip_ttl, ip_proto)
ip_header += pack('H', ip_check) + pack('!4s4s', ip_saddr, ip_daddr)
return frame_header + ip_header
# def construct_ip_header(source_ip, dest_ip):
# ip_ihl = 5
# ip_ver = 4
# ip_tos = 0
# #Id of this packet
# ip_id = 54321
# ip_frag_off = 0
# ip_ttl = 255
# ip_proto = socket.IPPROTO_TCP
# #os will fill the following two field
# ip_tot_len = 0
# ip_check = 0
# ip_saddr = socket.inet_aton (source_ip)
# ip_daddr = socket.inet_aton (dest_ip)
# ip_ihl_ver = (ip_ver << 4) + ip_ihl
# ip_header = pack('!BBHHHBBH4s4s' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off,
# ip_ttl, ip_proto, ip_check, ip_saddr, ip_daddr)
# return ip_header
def construct_packet(source_ip, dest_ip, user_data, seq, ack, ptype, withfin = 0):
global CSEQ_NUM
global SSEQ_NUM
print ptype,
print seq,
print ack
# tcp header fields
tcp_urg = 0
tcp_rst = 0
tcp_doff = 5 #4 bit field, size of tcp header, 5 * 4 = 20 bytes
tcp_dest = 80 # destination port
if ptype == 'syn':
tcp_seq = seq
CSEQ_NUM = tcp_seq
print 'cseq: ' + str(CSEQ_NUM)
tcp_ack_seq = ack
tcp_doff = 5 #4 bit field, size of tcp header, 5 * 4 = 20 bytes
#tcp flags
tcp_fin = 0
tcp_syn = 1
tcp_psh = 0
tcp_ack = 0
elif ptype == 'ack_syn':
print 'ack with seq: ' + str(seq) + " " + str(ack)
tcp_seq = seq
CSEQ_NUM = tcp_seq
tcp_ack_seq = ack
SSEQ_NUM = tcp_ack_seq
#tcp flags
tcp_fin = 0
tcp_syn = 0
tcp_psh = 0
tcp_ack = 1
elif ptype == 'ack':
print 'ack with seq: ' + str(seq) + " " + str(ack)
tcp_seq = seq
tcp_ack_seq = ack
SSEQ_NUM = tcp_ack_seq
#tcp flags
tcp_fin = 0
tcp_syn = 0
tcp_psh = 0
tcp_ack = 1
elif ptype == 'send':
print 'ack with seq: ' + str(seq) + " " + str(ack)
print user_data
tcp_seq = seq
tcp_ack_seq = ack
#tcp flags
tcp_fin = 0
tcp_syn = 0
tcp_psh = 1
tcp_ack = 1
elif ptype == 'fin':
print 'ack with seq: ' + str(seq) + " " + str(ack)
tcp_seq = seq
tcp_ack_seq = ack
#tcp flags
tcp_fin = 1
tcp_syn = 0
tcp_psh = 0
tcp_ack = 1
if withfin == 1:
tcp_fin = 1
tcp_window = socket.htons(200) # maximum allowed window size
tcp_check = 0
tcp_urg_ptr = 0
tcp_offset_res = (tcp_doff << 4) + 0
tcp_flags = tcp_fin + (tcp_syn << 1) + (tcp_rst << 2) + (tcp_psh <<3) + (tcp_ack << 4) + (tcp_urg << 5)
tcp_header = pack('!HHLLBBHHH', SRC_PORT, tcp_dest, tcp_seq, tcp_ack_seq, tcp_offset_res,
tcp_flags, tcp_window, tcp_check, tcp_urg_ptr)
source_address = socket.inet_aton( source_ip )
dest_address = socket.inet_aton(dest_ip)
placeholder = 0
protocol = socket.IPPROTO_TCP
tcp_length = len(tcp_header) + len(user_data)
psh = pack('!4s4sBBH', source_address, dest_address , placeholder , protocol , tcp_length)
psh = psh + tcp_header + user_data;
# print psh
if len(psh) % 2 != 0:
psh = psh + ' '
tcp_check = checksum(psh)
#print tcp_checksum
# make the tcp header again and fill the correct checksum - remember checksum is NOT in network byte order
tcp_header = pack('!HHLLBBH' , SRC_PORT, tcp_dest, tcp_seq, tcp_ack_seq, tcp_offset_res, tcp_flags, tcp_window)
tcp_header += pack('H' , tcp_check) + pack('!H' , tcp_urg_ptr)
ip_header = construct_frame_ip_header(source_ip, dest_ip, len(tcp_header) + len(user_data))
packet = ip_header + tcp_header + user_data
CSEQ_NUM += len(user_data)
SENT_PKT[CSEQ_NUM] = [time.time(), user_data, seq, ack]
return packet
def hand_shake(source_ip, dest_ip):
global SSEQ_NUM
global CSEQ_NUM
global BASE_SEQ
global IP_ID
IP_ID = random.randint(10000, 30000)
try:
# s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.settimeout(180)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
CSEQ_NUM = random.randint(0, 65536)
BASE_SEQ = CSEQ_NUM
packet = construct_packet(source_ip, dest_ip, '', CSEQ_NUM, 0, 'syn')
# s.sendto(packet, (dest_ip , 0)) # put this in a loop if you want to flood the target
s.bind(('eth0', 0))
s.send(packet)
# send_packet(source_ip, dest_ip, packet)
response = recive_packets(source_ip, dest_ip)
CACK_NUM = response.get('ack_num')
if (response.has_key('syn') and response.get('syn') == 1 and
response.has_key('ack') and response.get('ack') == 1):
SSEQ_NUM = response.get('seq_num')
print 'acking with CSEQ: ' + str(CSEQ_NUM)
CSEQ_NUM += 1
SSEQ_NUM += 1
packet = construct_packet(source_ip, dest_ip, '', CSEQ_NUM, SSEQ_NUM, 'ack_syn')
# s.sendto(packet, (dest_ip , 0)) # put this in a loop if you want to flood the target
s.bind(('eth0', 0))
s.send(packet)
# send_packet(source_ip, dest_ip, packet)
# response = recive_packets(source_ip, dest_ip)
return True
def get_user_data():
global USER_DATA
global CSEQ_NUM
cur_seq = CSEQ_NUM - BASE_SEQ
available_size = min([CWD * MSS - CACK_NUM, len(USER_DATA) - cur_seq])
print 'current seq: ' + str(cur_seq)
print 'fetching data for packet with CSEQ: ' + str(cur_seq)
print len(USER_DATA),
print available_size,
print cur_seq
current_data = USER_DATA[cur_seq: cur_seq + available_size]
res = ''
if len(current_data) > MSS:
res = current_data[:MSS]
current_data = current_data[MSS:]
else:
res = current_data + ''
current_data = ''
print 'length: ' + str(len(res))
return res
def check_for_retransmit(source_ip, dest_ip):
cur_t = time.time()
for seq in SENT_PKT.keys():
if SENT_PKT.get(seq)[0] - cur_t > 60:
op = SENT_PKT.get(seq)
packet = construct_packet(source_ip, dest_ip, op[1], op[2], op[3], 'send')
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.settimeout(180)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
s.bind(('eth0', 0))
s.send(packet)
def send_to_dest(url, source_ip, dest_ip):
global CSEQ_NUM
global SSEQ_NUM
global CWD
if hand_shake(source_ip, dest_ip):
print 'finished hand_shake'
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.settimeout(180)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
# final full packet - syn packets dont have any data
get_http_header(url)
data = get_user_data()
packet = construct_packet(source_ip, dest_ip, data, CSEQ_NUM, SSEQ_NUM, 'send')
# s.sendto(packet, (dest_ip, 0))
s.bind(('eth0', 0))
s.send(packet)
response = recive_packets(source_ip, dest_ip)
while True:
print '##############################'
# print 'checksum: ' + str(checksum_packet(response))
if NEED_PRINT:
print 'Right addr pair: '
if response.get('ack') == 1:
temp = response.get('ack_num')
CACK_NUM = temp
if SENT_PKT.has_key(temp):
del SENT_PKT[temp]
check_for_retransmit(source_ip, dest_ip)
if response.get('ack') == 1 and response.get('psh') == 1:
CWD += 1
if len(response.get('data')) > 0:
if NEED_PRINT:
print 'pushing to buffer and ack: '
if response.get('seq_num') not in DATA_RCVD.keys():
DATA_RCVD[response.get('seq_num')] = response.get('data')
update_ack()
if NEED_PRINT:
print 'preparing ack: '
data = get_user_data()
if len(data) > 0:
print 'with data: !!!!!!!!!!!! = ' + data
if response.get('fin') != 1:
packet = construct_packet(source_ip, dest_ip, data, CSEQ_NUM, SSEQ_NUM, 'ack')
else:
packet = construct_packet(source_ip, dest_ip, data, CSEQ_NUM, SSEQ_NUM, 'ack', withfin = 1)
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.settimeout(180)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
s.bind(('eth0', 0))
s.send(packet)
elif response.get('ack') == 1 and response.get('fin') != 1:
print 'just ack'
SSEQ_NUM = response.get('seq_num')
CSEQ_NUM = response.get('ack_num')
elif len(USER_DATA) > CSEQ_NUM:
send_user_data(source_ip, dest_ip, response.get('window_size'))
elif response.get('fin') == 1:
start_tear_down(source_ip, dest_ip, response.get('seq_num'))
break
response = recive_packets(source_ip, dest_ip)
print 'rcvd wrong packet'
def update_ack():
global CSEQ_NUM
global SSEQ_NUM
temp = SSEQ_NUM
print ' '.join(map(str, DATA_RCVD.keys()))
while True:
if SSEQ_NUM in DATA_RCVD.keys():
SSEQ_NUM += len(DATA_RCVD.get(SSEQ_NUM))
else:
break
print 'now ack: ' + str(SSEQ_NUM),
print 'skip from ' + str(temp) + 'to ' + str(SSEQ_NUM)
def send_user_data(source_ip, dest_ip, receiver_adv_win):
global CSEQ_NUM
global SSEQ_NUM
if len(USER_DATA) <= CSEQ_NUM:
return
while True:
data = get_user_data()
if len(data) > 0:
packet = construct_packet(source_ip, dest_ip, data, CSEQ_NUM, SSEQ_NUM, 'send')
try:
# s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.settimeout(180)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
# s.sendto(packet, (dest_ip, 0))
s.bind(('eth0', 0))
s.send(packet)
num_sent += len(data)
def start_tear_down(source_ip, dest_ip, seq):
print 'tearing down'
packet = construct_packet(source_ip, dest_ip,'', CSEQ_NUM, seq+1, 'fin')
try:
# s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
s.settimeout(180)
except socket.error, msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
# s.sendto(packet, (dest_ip, 0))
s.bind(('eth0', 0))
s.send(packet)
#This func can be improve a lot
def recive_packets(source_ip, dest_ip):
global LAST_RCVD_TIME
while True:
try:
recv_sockraw = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
recv_sockraw.settimeout(180)
except socket.error , msg:
print 'recv error'
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
try:
received_packet = recv_sockraw.recvfrom(65536)
print 'rcvd: ' + str(time.time())
LAST_RCVD_TIME = time.time()
except socket.timeout:
print 'time out'
sys.eixt(0)
#packet string from tuple
received_packet = received_packet[0]
# print 'recv length: ' + str(len(received_packet))
received_packet = received_packet[14:]
ip_header = received_packet[0:20]
iph = unpack('!BBHHHBBH4s4s' , ip_header)
if ip_header_checksum(ip_header) != 0:
print ip_header_checksum(ip_header)
continue
mapIpTmp = decodeIpHeader(ip_header)
if (mapIpTmp.get('protocol') != socket.IPPROTO_TCP
or not mapIpTmp.has_key('srcaddr') or not mapIpTmp.get('srcaddr') == dest_ip
or not mapIpTmp.has_key('dstaddr') or not mapIpTmp.get('dstaddr') == source_ip):
continue
tcp_header = received_packet[20:40]
if len(iph) > 0:
mapIpTmp = decode_tcp_header(tcp_header, mapIpTmp)
mapIpTmp['data'] = received_packet[40:]
ALL_PACKS.append(mapIpTmp.get('seq_num'))
# for k,v in mapIpTmp.items():
# print k,"\t:\t",v
# print '******************************************'
# print str(mapIpTmp.get('dst_port')) + str(mapIpTmp.get('dst_port') == src_port)
# print str(mapIpTmp.get('srcaddr')) + str(mapIpTmp.get('src_port') == dest_ip) + dest_ip
# print str(mapIpTmp.get('dstaddr')) + str(mapIpTmp.get('dstaddr') == source_ip)
print mapIpTmp.get('dstaddr'),
print mapIpTmp.get('srcaddr'),
print mapIpTmp.get('dst_port'),
print mapIpTmp.get('src_port')
if (mapIpTmp.has_key('dst_port') and mapIpTmp.get('dst_port') == SRC_PORT
and mapIpTmp.has_key('srcaddr') and mapIpTmp.get('srcaddr') == dest_ip
and mapIpTmp.has_key('dstaddr') and mapIpTmp.get('dstaddr') == source_ip):
return mapIpTmp
def get_http_header(url):
global USER_DATA
url = urlparse.urlparse(url)
path = url.path
if path == "":
path = "/"
header = 'GET %s HTTP/1.1\r\n' % (path)
header += 'Host: %s\r\n' % (url.hostname)
header += 'Connection: keep-alive\r\n'
header += 'Cache-Control: max-age=0\r\n'
header += 'Accept: text/html,application/xhtmlxml,application/xml;q=0.9,image/webp,*/*;q=0.8\r\n'
header += 'Accept-Language: zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-TW;q=0.2\r\n'
header += "\r\n"
print 'header length: ' + str(len(header))
print header
USER_DATA += header
def get_dst_mac_addr(target, sourceipaddress):
global DST_MAC
# create packet
interface = 'eth0'
eth_hdr = pack("!6s6s2s", '\xff\xff\xff\xff\xff\xff', SRC_MAC.replace(':','').decode('hex'), '\x08\x06')
arp_hdr = pack("!2s2s1s1s2s", '\x00\x01', '\x08\x00', '\x06', '\x04', '\x00\x01')
arp_sender = pack("!6s4s", SRC_MAC.replace(':','').decode('hex'), socket.inet_aton(sourceipaddress))
arp_target = pack("!6s4s", '\x00\x00\x00\x00\x00\x00', socket.inet_aton(target))
while len(DST_MAC) == 0:
try:
# send packet
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0806))
s.bind((interface, socket.htons(0x0806)))
s.send(eth_hdr + arp_hdr + arp_sender + arp_target)
# wait for response
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0806))
s.settimeout(0.5)
response = s.recvfrom(2048)
responseMACraw = binascii.hexlify(response[0][6:12])
responseMAC = ":".join(responseMACraw[x:x+2] for x in xrange(0, len(responseMACraw), 2))
responseIP = socket.inet_ntoa(response[0][28:32])
if target == responseIP:
DST_MAC = responseMAC
print "Response from the mac %s on IP %s" % (responseMAC, responseIP)
except socket.timeout:
print 'timeout'
time.sleep(1)
def main():
global GATEWAYIP
global SRC_MAC
global DST_MAC
GATEWAYIP = get_default_gateway_linux()
SRC_MAC = get_local_mac_addr()
# now start constructing the packet
url = ''
if len(sys.argv) > 1:
url = sys.argv[1]
else:
url = 'http://david.choffnes.com/classes/cs4700sp15/2MB.log'
packet = ''
source_ip = get_local_ip_addr()
if source_ip == '':
print 'can not get ip_addr'
exit(1)
print 'source_ip ' + source_ip
# dest_ip = '192.168.1.1' # or socket.gethostbyname('www.google.com')
# url = 'http://david.choffnes.com'
# url = 'http://stackoverflow.com/questions/13405397/java-socket-client-sending-extra-bytes-to-device'
purl = urlparse.urlparse(url)
dest_ip = socket.gethostbyname(purl.hostname)
print 'dest_ip ' + dest_ip
get_dst_mac_addr(GATEWAYIP, source_ip)
print GATEWAYIP
print DST_MAC
print SRC_MAC
send_to_dest(url, source_ip, dest_ip)
data = ''
for k in sorted(DATA_RCVD.keys()):
data += DATA_RCVD.get(k)
cnt = data.find('\r\n\r\n')
data = data[cnt+4:]
# print data
filename = 'index.html'
if '/' in purl.path:
path = purl.path.split('/')
if path[-1] != '':
filename = path[-1]
if '.' in filename and filename.split('.')[1] in ['html', 'htm']:
pos = 0
chunked = ''
now = 0
while pos < len(data):
pos = data.find('\r\n', now)
try:
chunked += data[pos+2 : pos + 2 + int(data[now:pos], 16)]
now = pos + int(data[now:pos], 16) + 4
except ValueError:
break
f = open(filename, 'w')
f.write(chunked)
f.close()
else:
f = open(filename, 'wb')
f.write(data)
f.close()
for s in ALL_PACKS:
print s
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
c4d693a018899753b9d47f6da7643ece8efb4bfe
|
10fbe5526e5f0b8588b65f70f088cd86b6e9afbe
|
/irmtbds/migrations/0002_auto_20150218_1621.py
|
3c05b27f5b6c037590a673b577c9744a196e934f
|
[] |
no_license
|
MarkusH/django-migrations-benchmark
|
eb4b2312bb30a5a5d2abf25e95eca8f714162056
|
e2bd24755389668b34b87d254ec8ac63725dc56e
|
refs/heads/master
| 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 502 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('irmtbds', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='rqzheruyb',
name='xknvpfy',
),
migrations.AddField(
model_name='rqzheruyb',
name='kplrvqptcm',
field=models.IntegerField(default=0),
),
]
|
[
"[email protected]"
] | |
eda512e915db0b90dc647c7c5544c47aa6875d2b
|
99bb3ef44a1c0727017aab5a9161c0ffbb333c86
|
/hdl_comments.py
|
2588175449fc6768f253dd68f0156ce19f170cdb
|
[
"MIT"
] |
permissive
|
rdustinb/GAPy
|
7c35c73c89e85ad40fce99d18e2ba129b2439683
|
3caf19daee229636ed6fc2feac202cbdad8557cb
|
refs/heads/master
| 2022-08-09T18:24:46.595695 | 2022-07-26T16:51:04 | 2022-07-26T16:51:04 | 44,443,126 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,695 |
py
|
import sys, getopt, os
from termcolor import colored, cprint
def main(argv):
"""
Main Script Execution Point
"""
directory = ''
singlefile = ''
ignorestart = 'NaN'
try:
opts,args = getopt.getopt(argv, "hf:d:i:")
except getopt.GetoptError:
print('python hdl_comments.py -f <filename>')
print('or')
print('python hdl_comments.py -d <directory name>')
sys.exit(2)
# Parse through the options
for opt,arg in opts:
if opt == '-h':
print('python hdl_comments.py -f <filename>')
print('or')
print('python hdl_comments.py -d <directory name>')
sys.exit()
elif opt in ('-f'):
singlefile = arg
elif opt in ('-d'):
directory = arg
elif opt in ('-i'):
ignorestart = arg
# If Directory has been specified, only parse the directory
if(directory != ''):
for path, subdirs, files in os.walk(directory):
if ".svn" in path:
next
else:
for filename in files:
f = os.path.join(path, filename)
parse_single_file(f,ignorestart)
# If Only the file has been specified, parse the file
elif(singlefile != ''):
parse_single_file(singlefile,ignorestart)
def parse_single_file(file_name_path,ignore_start):
if(file_name_path[-3:] == "vhd"):
(total,single) = count_vhdl_file_comments(file_name_path,ignore_start)
percentage = (single/total)*100
if(percentage < 15):
percentage = colored("%.1f"%(percentage), 'red')
elif(percentage < 20):
percentage = colored("%.1f"%(percentage), 'yellow')
else:
percentage = colored("%.1f"%(percentage), 'green')
print("%s"%(file_name_path))
print("\tTotal Lines Parsed:\t\t%s"%(total))
print("\tSingle Line Comments:\t\t%s"%(single))
print("\tComment-Total Percentage:\t%s"%(percentage))
elif(file_name_path[-1:] == 'v' or file_name_path[-2:] == 'sv' or file_name_path[-2:] == 'vh'):
(total,block,single) = count_verilog_file_comments(file_name_path,ignore_start)
percentage = ((single+block)/total)*100
if(percentage < 15):
percentage = colored("%.1f"%(percentage), 'red')
elif(percentage < 20):
percentage = colored("%.1f"%(percentage), 'yellow')
else:
percentage = colored("%.1f"%(percentage), 'green')
print("%s"%(file_name_path))
print("\tTotal Lines Parsed:\t\t%s"%(total))
print("\tBlock Comment Lines:\t\t%s"%(block))
print("\tSingle Comment Lines:\t\t%s"%(single))
print("\tComment-Total Percentage:\t%s"%(percentage))
def count_verilog_file_comments(file_name_path,ignore_start):
"""
This functions scans a single SystemVerilog/Verilog file, or other file that
is compatible with that language.
"""
total_file_lines = 1
block_comment_line_count = 0
single_comment_line_count = 0
ignoring = 1
if(ignore_start == "NaN"):
ignoring = 0
with open(file_name_path, 'r') as fp:
counting_block_comment = 0
for line in fp:
if(ignoring == 1):
if(ignore_start in line):
ignoring = 0
else:
total_file_lines += 1
if(counting_block_comment == 1):
block_comment_line_count += 1
if "*/" in line and "/*" in line:
# Weird case where a block comment may end, and another begin on the same line.
counting_block_comment = 1
if "*/" in line:
counting_block_comment = 0
elif "/*" in line and "*/" in line:
# Weird case where a block comment may be started and ended in the same line
counting_block_comment = 0
block_comment_line_count += 1
elif "/*" in line:
counting_block_comment = 1
block_comment_line_count += 1
elif "//" in line:
single_comment_line_count += 1
# Return the Tuple: (total, block, single) for external processing/display
return (total_file_lines,block_comment_line_count,single_comment_line_count)
def count_vhdl_file_comments(file_name_path,ignore_start):
"""
This function scans a single VHDL file for instances of a VHDL comment. Since
VHDL doesn't have block comments available, only the single line comment can
be scanned for.
"""
total_file_lines = 1
single_comment_line_count = 0
ignoring = 1
if(ignore_start == "NaN"):
ignoring = 0
with open(file_name_path, 'r') as fp:
for line in fp:
if(ignoring == 1):
if(ignore_start in line):
ignoring = 0
else:
total_file_lines += 1
if "--" in line:
single_comment_line_count += 1
# Return the Tuple: (total, single) for external processing/display
return (total_file_lines,single_comment_line_count)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"[email protected]"
] | |
c36c3e33fb0f68a51c3042802b366cdc95ddec55
|
a22661610155c1c144f082b6ba4c7d935eeddbd6
|
/tests/test_user_model.py
|
675e627af78865e3c33ce1130e2919e69658d214
|
[] |
no_license
|
gerocha/petshop
|
7d21b746f83549671033ab772eaa652861791063
|
7e0b64e002c05ed4a9b17b59196fa55ed040df82
|
refs/heads/master
| 2020-03-28T09:48:51.764661 | 2018-09-23T01:19:14 | 2018-09-23T01:19:14 | 148,061,734 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
from petshop.user_model import authenticate, get_user
def test_authenticate_existing_user_should_return_user(user_batima):
auth = authenticate(username=user_batima['username'],
password=user_batima['password'])
assert auth is not None
def test_is_correct_password_with_correct_password(user_batima):
user = get_user('batima')
assert user.is_correct_password('123456')
|
[
"gel@[email protected]"
] | |
effb3ee7b4162231d120df597dc22d67af06d86a
|
36aa9e4268394b23826abf20c64bcb9821c102c3
|
/FTP_downloadBinFile.py
|
47266e801cea51b5fa01fec2e11bd889a868b22e
|
[
"Apache-2.0"
] |
permissive
|
ChenSunMac/BlueNose_ToolKit
|
011b3ad0fd1c9eb5eb206f26f66a798518ebb3df
|
edd4fbc0e13903c591f156d673d584551403905c
|
refs/heads/master
| 2021-09-15T04:10:55.561165 | 2018-05-25T14:19:51 | 2018-05-25T14:19:51 | 108,892,945 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,555 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 11:08:02 2017
@author: Chens
"""
import ftplib
import os
import socket
#HOST是远程FTP地址
HOST = '222.222.444.92'
DIRN = 'hcjy/css/'
def main():
try:
f = ftplib.FTP(HOST)
except ftplib.error_perm:
print('无法连接到"%s"' % HOST)
return
print('连接到"%s"' % HOST)
try:
#user是FTP用户名,pwd就是密码了
f.login('user','pwd')
except ftplib.error_perm:
print('登录失败')
f.quit()
return
print('登陆成功')
try:
#得到DIRN的工作目录
f.cwd(DIRN)
except ftplib.error_perm:
print('列出当前目录失败')
f.quit()
return
print(f.nlst())
#f.nlst()返回一个当前目录下的列表返回给downloadlist
downloadlist = f.nlst()
try:
os.getcwd()
#创建一个css的同名文件夹
os.mkdir('css')
#切换到css文件夹,也就是改变当前工作目录,目的是为了将要下载的文件下载到这个文件夹
os.chdir('css')
#遍历刚才返回的文件名列表
for FILE in downloadlist:
f.retrbinary('RETR %s' % FILE,open(FILE,'wb').write)
print('文件"%s"下载成功' % FILE)
except ftplib.error_perm:
print('无法读取"%s"' % FILE)
os.unlink(FILE)
else:
print('文件全部下载完毕!')
f.quit()
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
08e93077ca5d190a72f22f0dde55519979a6b6a6
|
3e0341c10981b49d15c3fb458f63de59357a821b
|
/venv/bin/wheel
|
51ea28c4f7ad7e6fa3b4354fa985621d1b4b0ebb
|
[] |
no_license
|
sagarsmn331/meon-task3
|
555f0562c036693b8e728946e01e213d1e3bdf8a
|
18c2e8389ee8d2bbbe18e506ccbb6d003c4ed2cf
|
refs/heads/master
| 2022-12-02T11:54:03.125091 | 2020-08-18T11:28:35 | 2020-08-18T11:28:35 | 288,436,935 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
#!/home/sagar/meon5/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
07d46fef45ca10f86c44bdf72420c66f478bbe99
|
032edbd5eccee1896a207f6e0b0ba1d026d4e984
|
/basics/threads.py
|
86d4e0ae8c859f7bfd652c9ccc8e1ee29e98a117
|
[
"MIT"
] |
permissive
|
grzesk075/PythonSandbox
|
95812a69e1b53e1faea574c10ec8db6fc79a58d2
|
9fa879b4a15a971258c458bbfe55c7a899c95ce5
|
refs/heads/master
| 2021-06-10T11:35:06.781026 | 2019-11-20T14:10:18 | 2019-11-20T14:10:18 | 156,559,088 | 0 | 0 |
MIT
| 2021-04-20T17:46:55 | 2018-11-07T14:31:11 |
Python
|
UTF-8
|
Python
| false | false | 547 |
py
|
import threading
import time
# import queue - designed for inter-thread communication and synchronization
class AsyncPrinter(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(20):
time.sleep(0.01)
print('Async:', i)
AsyncPrinter().start()
for i in range(20):
time.sleep(0.01)
print('Main thread:', i)
# module weakref enables to create weak references for use in cache or tracking
# they don't prevent from garbage collector
|
[
"[email protected]"
] | |
8e0fdec3518e0ed5c1d564e69641dbdf3e33a918
|
9b617d281d83880d385a57809c4cafd55024d516
|
/manage.py
|
ca0d6f3d331fffa4ace90b822a09041b6d37c7af
|
[] |
no_license
|
crowdbotics-users/wwickey-crowdbotics-164
|
3df5074f39dc34de2def1bde928f523391942689
|
909b185e528f60b9258b317f7c26b35e791d8685
|
refs/heads/master
| 2020-03-16T12:02:27.614606 | 2018-05-08T20:01:05 | 2018-05-08T20:01:05 | 132,658,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wwickey_crowdbotics_164.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
5d8f1425b2fad2472d4c17f7052ec4d9db036f97
|
0e0cfb81069fda8cf67561b91cd4968f8e5c3d0d
|
/Actividad Teoria - Entrega 3 Mayo/src/windows/menu.py
|
71a6aad03a52f961be8c5255322f42d246454901
|
[] |
no_license
|
juliermili/seminario
|
919a555567c0e44e64ea5eb0591677425d26abb6
|
2fc2be8e8d1a7d060c20a6b07e8df03aacb5e62c
|
refs/heads/master
| 2023-04-23T20:03:54.988819 | 2021-05-03T03:39:20 | 2021-05-03T03:39:20 | 352,199,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 296 |
py
|
import PySimpleGUI as sg
def build():
layout = [
[sg.Button('Data1', size=(50, 2), key="-DATA1-")],
[sg.Button('Data2', size=(50, 2), key="-DATA2-")],
[sg.Button('Salir', size=(50, 2), key="-EXIT-")]
]
menu = sg.Window('menu').Layout(layout)
return menu
|
[
"[email protected]"
] | |
e86b129bd4a1c5aa43609bb6c91a77d19a9d689e
|
eb861e71ac828c01fa672acb8f69139d76f78981
|
/examples/nodeproppred/arxiv/ReLU.py
|
b6a73c6e4417bbf8c31bdce00c32bbddaa2aa930
|
[
"MIT"
] |
permissive
|
yifeiacc/ogbExperiment
|
d1ea20104886be5aa5a85ad97e6810597ea481a3
|
3b1dffd80c27b7f85101afa515461f327b87a6b2
|
refs/heads/master
| 2023-02-07T04:25:44.783600 | 2020-12-29T08:46:41 | 2020-12-29T08:46:41 | 325,198,626 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,215 |
py
|
import torch.nn as nn
from torch_geometric.nn.conv import MessagePassing
from torch.nn import Parameter
from torch_geometric.nn.inits import glorot
import torch
from torch_geometric.utils import remove_self_loops, add_self_loops, softmax
from torch import Tensor
from torch_sparse import SparseTensor, set_diag
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.utils import degree
class EdgeReluV2(MessagePassing):
def __init__(self, channels,
k=2, reduction=2,
add_self_loops=True,
negative_slope=0.2,
**kwargs):
kwargs.setdefault('aggr', 'mean')
super(EdgeReluV2, self).__init__(node_dim=0, **kwargs)
self.negative_slope = negative_slope
self.channels = channels
self.k = k
self.add_self_loops = add_self_loops
self.fc1 = GCNConv(channels, channels // reduction)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(channels // reduction, 2*k*channels)
self.sigmoid = nn.Sigmoid()
self.register_buffer('lambdas', torch.Tensor([1.]*k + [0.5]*k).float())
self.register_buffer('init_v', torch.Tensor(
[1.] + [0.]*(2*k - 1)).float())
self.att_l = Parameter(torch.Tensor(1, channels))
self.att_r = Parameter(torch.Tensor(1, channels))
self.reset_parameters()
def get_relu_coefs(self, x, edge_index):
theta = x
theta = self.fc1(theta, edge_index)
theta = theta.mean(dim=0)
theta = self.relu(theta)
theta = self.fc2(theta)
# theta = 2 * self.sigmoid(theta) - 1
theta = F.tanh(theta)
return theta
def reset_parameters(self):
glorot(self.att_l)
glorot(self.att_r)
def forward(self, x, edge_index, size=None):
x_l = x_r = x
alpha_l = (x_l * self.att_l).sum(dim=-1)
alpha_r = (x_r * self.att_r).sum(dim=-1)
# print(alpha_l.shape)
# print(alpha_r.shape)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
num_nodes = x_l.size(0)
if x_r is not None:
num_nodes = min(num_nodes, x_r.size(0))
if size is not None:
num_nodes = min(size[0], size[1])
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
if isinstance(edge_index, SparseTensor):
self.degree = edge_index.sum(dim=0)
else:
_, col = edge_index[0], edge_index[1]
self.degree = degree(col)
theta = self.get_relu_coefs(x, edge_index)
self.theta = theta.view(-1, self.channels, 2 * self.k)
out = self.propagate(edge_index, x=(x_l, x_r),
alpha=(alpha_l, alpha_r), size=size)
return out
def message(self, x_j, alpha_j, alpha_i, index, ptr, size_i):
alpha = alpha_j if alpha_i is None else alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
gamma = self.degree[index]/3
# self.degree = None
alpha = alpha / 10
alpha = softmax(alpha, index, ptr, size_i) * gamma
alpha = torch.min(alpha, torch.ones_like(alpha))
alpha = alpha.view(-1, 1, 1)
# relu_coefs = (alpha * self.theta) * self.lambdas + self.init_v
relu_coefs = (self.theta * self.lambdas + self.init_v) * alpha
# relu_coefs = F.dropout(relu_coefs, 0.2, training=self.training)
x = x_j
x = x.unsqueeze(-1)
x_perm = x.permute(2, 0, 1).unsqueeze(-1)
output = x_perm * relu_coefs[:, :, :self.k] + relu_coefs[:, :, self.k:]
result = torch.max(output, dim=-1)[0].permute(1, 2, 0).squeeze()
return result
class DyReLU(nn.Module):
def __init__(self, channels, reduction=4, k=2):
super(DyReLU, self).__init__()
self.channels = channels
self.k = k
self.fc1 = GCNConv(channels, channels // reduction)
# self.fc1 = GraphConv(channels, channels // reduction)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(channels // reduction, 2*k)
self.sigmoid = nn.Sigmoid()
self.register_buffer('lambdas', torch.Tensor([1.]*k + [0.5]*k).float())
self.register_buffer('init_v', torch.Tensor(
[1.] + [0.]*(2*k - 1)).float())
def get_relu_coefs(self, x, edge_index):
theta = x
theta = self.fc1(theta, edge_index)
theta = theta.mean(dim=0)
theta = self.relu(theta)
theta = self.fc2(theta)
theta = 2 * self.sigmoid(theta) - 1
return theta
def forward(self, x, edge_index):
raise NotImplementedError
class DyReLUC(DyReLU):
def __init__(self, channels, reduction=4, k=2):
super(DyReLUC, self).__init__(channels, reduction, k)
self.fc2 = nn.Linear(channels // reduction, 2*k*channels)
# self.pos = GraphConv(channels, 1)
self.pos = GCNConv(channels, 1)
self.dropout = nn.Dropout(0.2)
def pos_coefs(self, x, edge_index):
x = self.pos(x, edge_index)
x = x.squeeze()
x = x / 10
x_norm = F.softmax(x).view(-1, 1)
x_norm = x_norm * (x.shape[0]/3)
return torch.min(x_norm, torch.ones_like(x_norm))
def forward(self, x, edge_index):
assert x.shape[1] == self.channels
theta = self.get_relu_coefs(x, edge_index)
relu_coefs = theta.view(-1, self.channels, 2 * self.k)
pos_norm_coefs = self.pos_coefs(x, edge_index).view(-1, 1, 1)
relu_coefs = relu_coefs * pos_norm_coefs * self.lambdas + self.init_v
# relu_coefs = F.dropout(relu_coefs, 0.2, training=self.training)
x = x.unsqueeze(-1)
x_perm = x.permute(2, 0, 1).unsqueeze(-1)
output = x_perm * relu_coefs[:, :, :self.k] + relu_coefs[:, :, self.k:]
result = torch.max(output, dim=-1)[0].permute(1, 2, 0).squeeze()
self.coefs = relu_coefs
return result
|
[
"[email protected]"
] | |
2a5762a03705f381381e6c124790e7ce1ab5d662
|
93a7db386dfa0ac0dc369cc7f4b974224c801d8d
|
/scripts/ngram_io.py
|
33d3856f68312a40f09259482de1803a86d567b5
|
[] |
no_license
|
lingxiao/good-great-combo
|
e051f20c89b7317a14ca5cee357bda7b095ce174
|
4d2691866bc21e2c542354ad3aae6f369eb86c87
|
refs/heads/master
| 2021-01-19T19:30:43.391759 | 2017-04-09T12:35:15 | 2017-04-09T12:35:15 | 83,699,772 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,764 |
py
|
############################################################
# Module : Open Ngram and read linguistic pattern
# Date : April 3rd, 2017
# Author : Xiao Ling, merle
############################################################
import os
############################################################
'''
@Use : Open all ngrams in ngram_dir and stream output as tuple of (ngram, count)
@Input : - ngram_dir :: String
- debug :: Bool, if true then only output parts of stream
@Output: Iterator output ngrams of form:
(ngram, count) :: Iterator (String, String)
Throw: NameError if path does not exists
'''
def with_ngram(ngram_dir, debug = False):
if not os.path.exists(ngram_dir):
raise NameError('Path not found at ' + ngram_dir)
else:
ngram_paths = [os.path.join(ngram_dir, p) for \
p in os.listdir(ngram_dir) if '.txt' in p]
if not ngram_paths:
raise NameError('Directory Empty at ' + ngram_dir)
if debug:
ngram_paths = [ngram_paths[0]]
for path in ngram_paths:
with open(path, 'rb') as h:
for line in h:
xsn = line.split('\t')
if len(xsn) == 2:
xs,n = xsn
n,_ = n.split('\n')
yield (xs,n)
############################################################
'''
@Use: Given path to linguistic pattern, output pattern
'''
def read_pattern(pattern_path):
if os.path.exists(pattern_path):
strong_weak, weak_strong = open(pattern_path,'rb').read().split('=== weak-strong')
strong_weak = [p for p in strong_weak.split('\n') if p][1:]
weak_strong = [p for p in weak_strong.split('\n') if p][:-1]
return {'strong-weak': strong_weak, 'weak-strong': weak_strong}
else:
raise NameError('Cannot find pattern at path ' + pattern_path)
|
[
"[email protected]"
] | |
d605544bb5bd4b5f2f891b75f75930b2d21e7fe4
|
048df2b4dc5ad153a36afad33831017800b9b9c7
|
/atcoder/agc008/agc008_c.py
|
01428e6976f334cebf389e5e84a0a5f947a48943
|
[] |
no_license
|
fluffyowl/past-submissions
|
a73e8f5157c647634668c200cd977f4428c6ac7d
|
24706da1f79e5595b2f9f2583c736135ea055eb7
|
refs/heads/master
| 2022-02-21T06:32:43.156817 | 2019-09-16T00:17:50 | 2019-09-16T00:17:50 | 71,639,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 276 |
py
|
a, b, c, d, e, f, g = map(int, raw_input().split())
if a > 0 and d > 0 and e > 0:
ans1 = b + a / 2 * 2 + d / 2 * 2 + e / 2 * 2
ans2 = b + 3 + (a-1) / 2 * 2 + (d-1) / 2 * 2 + (e-1) / 2 * 2
print max(ans1, ans2)
else:
print b + a / 2 * 2 + d / 2 * 2 + e / 2 * 2
|
[
"[email protected]"
] | |
6c9b764a14bf8bfa12a485be883a1637e1498062
|
8efe1f1ea1a9ac81b8abc261aae0a8084131b478
|
/utility/get_korea_stock_code_list.py
|
c7f6113b033b39f2293a5738c3d698a82af033f2
|
[] |
no_license
|
linanzhu/TradeBot
|
8de6befd715724ff5602b5dc71c89132b0cf0cca
|
a9b08fc48d2ad4b5e27c92c72968a88eed191acf
|
refs/heads/master
| 2020-03-18T17:47:48.062419 | 2018-05-27T14:30:13 | 2018-05-27T14:30:13 | 135,051,225 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
# -*- coding: utf-8 -*-
# 다음은 주식알고리즘 전문회사 사이트이다
# 상장 주식들의 코드를 제공한다.
# http://bigdata-trader.com/itemcodehelp.jsp
# Install해야 할 package들
# : pip install lxml
# : pip install html5lib
# : pip install beautifulsoup4
import os
import numpy as np
import html5lib
import pandas as pd
if float(pd.__version__[0:3]) >= 0.2:
# Need to install "pip3 install pandas_datareader"
import pandas_datareader.data as pdr
else:
import pandas.io.data as pdr
code_df = pd.read_html('http://bigdata-trader.com/itemcodehelp.jsp', header=0)[0]
code_df = code_df.rename(columns={'종목코드': 'Code', '종목명': 'Name', '종류': 'Market'})
code_df = code_df[['Code', 'Name', 'Market']]
# 종목코드가 6자리이기 때문에 6자리를 맞춰주기 위해 설정해줌
#code_df.Code = code_df.Code.map('{:06d}'.format)
savepath = os.getcwd() + '/korea_all_stock_code.csv'
code_df.to_csv(savepath, sep=',', index=False)
|
[
"[email protected]"
] | |
4a0bcf2cba4fcc37359b3360e26fdc69ed83fda6
|
e75f01e5db9239e637879c0dda03ce5254b14466
|
/fls/migrations/0016_auto_20190321_2317.py
|
086b104a3e567a4493465592b06fa92cb12ad32b
|
[] |
no_license
|
Korgutlova/diploma
|
b90b49fd6c2f2d2f36d6ae84b937821e9ae31dc3
|
cee5759a33330627d2b0927937138c128da4d368
|
refs/heads/master
| 2020-04-23T18:24:46.931706 | 2019-06-26T06:00:35 | 2019-06-26T06:00:35 | 171,366,047 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
# Generated by Django 2.1.7 on 2019-03-21 20:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fls', '0015_auto_20190319_2338'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='custom_user', to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
15255dffd47f10b3f99409f7b5dea95315005ab9
|
fb8cbebdf034b2f478943752d5443afc82c6eef5
|
/tuirer/users/models.py
|
a3a6f2b88a946f2a8ca0ab80decd3e78a3924509
|
[] |
no_license
|
fariasjr/CitiTuirer
|
f64e0ec93ef088f8140bb0961d2ad4ed3b59448a
|
deb3f7a9c2d45b8a7f54639037f097b99abdac11
|
refs/heads/master
| 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
picture = models.ImageField('Fotode perfil', default='/img/blank-pic.png')
following = models.ManyToManyField('self', blank=True)
|
[
"[email protected]"
] | |
2c56642bd2995c1960d44d3ecaf052abec1da8d3
|
208fc53e55e88b94aec48b0f1a9c13d19793bc0e
|
/assignment_1/problem2.py
|
bb967b7d93d18ebcb82e3e992ff6b6acece2e4c7
|
[] |
no_license
|
YixueWang/Priniciples-of-Informatics
|
8810e6753b698c431ce8426593e00d9c312ec932
|
1d0c25e55bce934043080605e7349a8d58cb518e
|
refs/heads/master
| 2021-01-10T14:35:10.516388 | 2015-12-03T21:14:40 | 2015-12-03T21:14:40 | 47,356,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
import sys
import pandas as pd
file1 = open(sys.argv[1])
df = pd.DataFrame.from_csv(file1)
a = set(df['Complaint Type'])
b = list(df['Complaint Type'])
for n in a:
print str(n) +' with '+ str(b.count(n)) +' complains'
|
[
"[email protected]"
] | |
605b69b97d71ca06ff53108fa17904b0d3e284f3
|
e9ceaa0bb091c189373ac0c70a545bca5791d50d
|
/egg_timer_2.py
|
feffa178e94448820d73a59289c40ae4f4105fe6
|
[] |
no_license
|
oupofat/lesson-one
|
5a967a14a68175ddde4b6f4e77d0a068e8ad8262
|
8fa591fc4be08ccd4eb0bb01a72eaa5795eb295a
|
refs/heads/master
| 2021-05-01T21:24:38.509955 | 2018-02-10T02:18:29 | 2018-02-10T02:18:29 | 120,976,057 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 325 |
py
|
'''It takes 2 eggs to make 5 pancakes. Ask the user how many pancakes they want to make, and tell them how many eggs they need. Decimals are okay.'''
pancakes = float(input("How many pancakse do you like"))
eggs = 2/5
eggs_uses = eggs * pancakes
print ("you will need",eggs_uses,"eggs to make this many",pancakes,"!")
|
[
"[email protected]"
] | |
522f3091c7930ae3db3d74832d60772a6e40c0df
|
4eb4a51464a1c6f1729a92e7dabc4b6e747c23e5
|
/tests/python/test_copy.py
|
bdcd4f6325adcb2e1c4e2989e555478273b0293f
|
[
"BSD-3-Clause"
] |
permissive
|
LongyanU/psvWave
|
b8ae7cd8024f940affa0d259fe27bc8582393f65
|
2caf2d5018a7f80ecb645640c8564afb52883819
|
refs/heads/master
| 2022-11-09T01:31:30.150963 | 2020-07-03T09:54:59 | 2020-07-03T09:54:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 583 |
py
|
import psvWave
import numpy
def test_copy():
model = psvWave.fdModel(
"../../tests/test_configurations/default_testing_configuration.ini"
)
model2: psvWave.fdModel = model.copy()
def test_copy_modify():
model1 = psvWave.fdModel(
"../../tests/test_configurations/default_testing_configuration.ini"
)
model2: psvWave.fdModel = model1.copy()
model1.set_model_vector(model1.get_model_vector() + 1)
model2.set_model_vector(model2.get_model_vector() - 1)
assert numpy.any(model2.get_model_vector() != model1.get_model_vector())
|
[
"[email protected]"
] | |
3e98058f7493b2f337bc4cd0732597063e4222b7
|
c5231cd5696c4c036723e9c36110cf6da9b0fc8f
|
/ui/testDialog.py
|
59f10c203e43cb0764b56dec160c3c9ff7a77422
|
[] |
no_license
|
kubaz122/Cristal
|
26afc0bc6619ce3cc01b785f7f34b0d59cbf03d9
|
7b2adf00608b96e90fa2c7a2c915167b1d0efaec
|
refs/heads/master
| 2020-12-27T15:57:32.644733 | 2020-02-03T16:40:31 | 2020-02-03T16:40:31 | 237,890,176 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,888 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/zielak/BlackDev/BlackDevUnpack-zqcazfqpyi/usr/src/cristal/ui/testDialog.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(424, 295)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 0, 1, 1)
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(30)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 0, 2, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 1, 1, 2)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Hello world!"))
|
[
"[email protected]"
] | |
b313a40665834fa373eb35ee42a45551315f5bcc
|
60e27db34568fbb042ebc1209a80a69ca244de0c
|
/resources/lib/common/constants.py
|
9fea76c391c544a3ee0a8b543751cf7d39c9cd57
|
[] |
no_license
|
Prometheusx-git/plugin.video.unofficial9anime
|
425f3fce1e299c06e96b873567a42e4b4f2c9c25
|
9ea5c5bde3337fadc3bde4e915db0f8efcfc2e9c
|
refs/heads/master
| 2021-01-01T06:30:31.144584 | 2019-03-20T08:49:23 | 2019-03-20T08:49:23 | 97,443,808 | 7 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,607 |
py
|
# -*- coding: utf-8 -*-
'''
The Unofficial Plugin for 9anime, aka UP9anime - a plugin for Kodi
Copyright (C) 2016 dat1guy
This file is part of UP9anime.
UP9anime is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
UP9anime is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with UP9anime. If not, see <http://www.gnu.org/licenses/>.
'''
from collections import OrderedDict
plugin_name = 'plugin.video.unofficial9anime'
runplugin = 'XBMC.RunPlugin(%s)'
appdata_cache_path = 'appdata.db'
watch_list = '/user/watchlist'
# Most lists on 9anime can be sorted in a variety of different ways
sort_types = OrderedDict([
('No order', ''),
('Default order', '&sort=default'),
('Sort by most watched', '&sort=views'),
('Sort by recently updated', '&sort=episode_last_added_at'),
('Sort by recently added', '&sort=post_date'),
('Sort by release date', '&sort=release_date'),
('Sort alphabetically', '&sort=title'),
('Sort by scores', '&sort=scores')
])
main_menu = [
('Last show visited', {'value':'sql', 'action':'lastvisited'}),
('Browse', {'value':'submenu_browse', 'action':'localList'}),
('Watch list', {'value':'submenu_watchlist', 'action':'watchList'}),
('Search', {'value':'search', 'action':'search'}),
('Settings', {'value':'settings', 'action':'settings'})
]
# 9anime's watch list has multiple categories
submenu_watchlist = [
('All', {'value':'all', 'action':'watchList'}),
('Watching', {'value':'watching', 'action':'watchList'}),
('Completed', {'value':'watched', 'action':'watchList'}),
('On-hold', {'value':'onhold', 'action':'watchList'}),
('Dropped', {'value':'dropped', 'action':'watchList'}),
('Plan to watch', {'value':'planned', 'action':'watchList'}),
]
submenu_browse = [
('Most Watched', {'value':'/filter?sort=views', 'action':'mediaList'}),
('Trending', {'value':'/', 'action':'trendingList'}),
('Last updated', {'value':'/filter?sort=episode_last_added_at', 'action':'mediaList'}),
('Newest', {'value':'/filter?sort=release_date', 'action':'mediaList'}),
#('Upcoming', {'value':'/upcoming', 'action':'upcomingList'}),
('Year', {'value':'submenu_year', 'action':'localList'}),
('Genre', {'value':'submenu_genres', 'action':'localList'}),
('Movies', {'value':'/filter?type%5B%5D=movie', 'action':'sortList'}),
('OVAs, ONAs, and Specials', {'value':'/filter?type%5B%5D=ova&type%5B%5D=ona&type%5B%5D=special', 'action':'sortList'}),
('Currently airing', {'value':'/filter?status%5B%5D=airing', 'action':'sortList'}),
('Finished', {'value':'/filter?status%5B%5D=finished', 'action':'sortList'}),
('Dubbed', {'value':'/filter?language=dubbed', 'action':'sortList'}),
('Subbed', {'value':'/filter?language=subbed', 'action':'sortList'})
]
submenu_year = [
('2017', {'value':'/filter?release%5B%5D=2017', 'action':'sortList'}),
('2016', {'value':'/filter?release%5B%5D=2016', 'action':'sortList'}),
('2015', {'value':'/filter?release%5B%5D=2015', 'action':'sortList'}),
('2014', {'value':'/filter?release%5B%5D=2014', 'action':'sortList'}),
('2013', {'value':'/filter?release%5B%5D=2013', 'action':'sortList'}),
('2012', {'value':'/filter?release%5B%5D=2012', 'action':'sortList'}),
('2011', {'value':'/filter?release%5B%5D=2011', 'action':'sortList'}),
('2010', {'value':'/filter?release%5B%5D=2010', 'action':'sortList'}),
('2009', {'value':'/filter?release%5B%5D=2009', 'action':'sortList'}),
('2008', {'value':'/filter?release%5B%5D=2008', 'action':'sortList'}),
('2007', {'value':'/filter?release%5B%5D=2007', 'action':'sortList'}),
('Older', {'value':'/filter?release%5B%5D=Older', 'action':'sortList'})
]
submenu_genres = [
('Action', {'value':'/filter?genre%5B%5D=1', 'action':'sortList'}),
('Adventure', {'value':'/filter?genre%5B%5D=2', 'action':'sortList'}),
('Cars', {'value':'/filter?genre%5B%5D=3', 'action':'sortList'}),
('Comedy', {'value':'/filter?genre%5B%5D=4', 'action':'sortList'}),
('Dementia', {'value':'/filter?genre%5B%5D=5', 'action':'sortList'}),
('Demons', {'value':'/filter?genre%5B%5D=6', 'action':'sortList'}),
('Drama', {'value':'/filter?genre%5B%5D=7', 'action':'sortList'}),
('Ecchi', {'value':'/filter?genre%5B%5D=8', 'action':'sortList'}),
('Fantasy', {'value':'/filter?genre%5B%5D=9', 'action':'sortList'}),
('Game', {'value':'/filter?genre%5B%5D=10', 'action':'sortList'}),
('Harem', {'value':'/filter?genre%5B%5D=11', 'action':'sortList'}),
('Historical', {'value':'/filter?genre%5B%5D=12', 'action':'sortList'}),
('Horror', {'value':'/filter?genre%5B%5D=13', 'action':'sortList'}),
('Josei', {'value':'/filter?genre%5B%5D=14', 'action':'sortList'}),
('Kids', {'value':'/filter?genre%5B%5D=15', 'action':'sortList'}),
('Magic', {'value':'/filter?genre%5B%5D=16', 'action':'sortList'}),
('Martial Arts', {'value':'/filter?genre%5B%5D=17', 'action':'sortList'}),
('Mecha', {'value':'/filter?genre%5B%5D=18', 'action':'sortList'}),
('Military', {'value':'/filter?genre%5B%5D=19', 'action':'sortList'}),
('Music', {'value':'/filter?genre%5B%5D=20', 'action':'sortList'}),
('Mystery', {'value':'/filter?genre%5B%5D=21', 'action':'sortList'}),
('Parody', {'value':'/filter?genre%5B%5D=22', 'action':'sortList'}),
('Police', {'value':'/filter?genre%5B%5D=23', 'action':'sortList'}),
('Psychological', {'value':'/filter?genre%5B%5D=24', 'action':'sortList'}),
('Romance', {'value':'/filter?genre%5B%5D=25', 'action':'sortList'}),
('Samurai', {'value':'/filter?genre%5B%5D=26', 'action':'sortList'}),
('School', {'value':'/filter?genre%5B%5D=27', 'action':'sortList'}),
('Sci-Fi', {'value':'/filter?genre%5B%5D=28', 'action':'sortList'}),
('Seinen', {'value':'/filter?genre%5B%5D=29', 'action':'sortList'}),
('Shoujo', {'value':'/filter?genre%5B%5D=30', 'action':'sortList'}),
('Shoujo Ai', {'value':'/filter?genre%5B%5D=31', 'action':'sortList'}),
('Shounen', {'value':'/filter?genre%5B%5D=32', 'action':'sortList'}),
('Shounen Ai', {'value':'/filter?genre%5B%5D=33', 'action':'sortList'}),
('Slice of Life', {'value':'/filter?genre%5B%5D=34', 'action':'sortList'}),
('Space', {'value':'/filter?genre%5B%5D=35', 'action':'sortList'}),
('Sports', {'value':'/filter?genre%5B%5D=36', 'action':'sortList'}),
('Super Power', {'value':'/filter?genre%5B%5D=37', 'action':'sortList'}),
('Supernatural', {'value':'/filter?genre%5B%5D=38', 'action':'sortList'}),
('Thriller', {'value':'/filter?genre%5B%5D=39', 'action':'sortList'}),
('Vampire', {'value':'/filter?genre%5B%5D=40', 'action':'sortList'}),
('Yaoi', {'value':'/filter?genre%5B%5D=41', 'action':'sortList'}),
('Yuri', {'value':'/filter?genre%5B%5D=42', 'action':'sortList'})
]
ui_table = {
'submenu_watchlist': submenu_watchlist,
'submenu_browse': submenu_browse,
'submenu_year': submenu_year,
'submenu_genres': submenu_genres
}
|
[
"[email protected]"
] | |
bfc0eb29f275677c7395aadbfec57e0cf384125f
|
416753425946c580d452bfe0eb86563230e7d01b
|
/app_blog/tests_model.py
|
cc1c66310038afb71bc8f2a6b5b7e33fc9072dd9
|
[] |
no_license
|
UADesant/myblog
|
79404a0be206b1b57b71f37478b2ef1b1d27c362
|
68256f2847a6e9e3fcb2e3398d050cfd1c4b1bb4
|
refs/heads/master
| 2023-04-17T10:20:47.365102 | 2021-05-07T09:28:31 | 2021-05-07T09:28:31 | 359,869,024 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 504 |
py
|
from django.test import TestCase
# Create your tests here.
from .models import Category
class CategoryModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test
Category.objects.create(category='Innovations', slug='innovations')
def test_get_absolute_url(self):
category = Category.objects.get(id=1)
self.assertEquals(category.get_absolute_url(),
'/articles/category/innovations',)
|
[
"[email protected]"
] | |
727df59b7e7d7e6f5d0fe4af8ed16d4cd63151dd
|
0459eca6819b9a57a7fc388ee626fbcece9e6c90
|
/projet_st.R
|
941c8b00513acd02e1cda0a3404be7fc97b5a664
|
[] |
no_license
|
Orlogskapten/Vectoriel_auto_regressif
|
c3138ee88b05f1765cfb43941061e675ad984356
|
09782d9d33d9dc387d22c16803d14ffa7f78145a
|
refs/heads/master
| 2022-12-01T23:11:59.502390 | 2020-08-13T07:31:51 | 2020-08-13T07:31:51 | 287,211,705 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 44,876 |
r
|
#!/usr/bin/env python
# coding: utf-8
# # Projet d'analyse de séries temporelles
#
# En colaboration avec :
#
# - [Paul Leydier](https://github.com/pleydier)
#
# - [Damien Raimond](https://github.com/dams-lab/)
#
# - [Wenceslas Sanchez](https://github.com/Orlogskapten)
#
# ---
#
# Le but du projet est de:
#
# - développer (sans aucun package R) des outils permettant de modéliser un modèle VAR généralisé à l'ordre p
#
# - construire les fonctions de réponses aux chocs d'un modèle VAR d'ordre p
#
# - et d'appliquer tout cela à l'analyse des dépendances entre les économies chinoise, américaine et de la Zone Euro.
#
# Toutes les fonctions que nous avons développé sont généralisées pour l'ordre p, et sont commentées pour comprendre leurs inputs, et leur construction.
#
# ---
#
# ### Sommaire :
#
# [Question 1](#1)
#
# [Question 2](#2)
#
# [Question 3](#3)
#
# [Question 4](#4)
#
# [Annexes](#a)
# In[1]:
setwd("C:/Users/Wenceslas/Desktop/R/R_project/serie_temp/projet")
data= read.csv("Data.csv", head= TRUE, sep= ";", dec=",")
dates= as.Date(data[, 1], format= "%d.%m.%Y")
data[, 1] = NULL # on enleve les dates
data[, 4] = NULL # on enleve la dernière colonne
# On vérifie le typage
str(data)
# On a des factors au lieu de double
# on change donc le typage factor -> double
for (i in 1:3){
data[, i] = as.double(levels(data[, i]))[data[, i]] / 100 # on en profite pour diviser par 100
}
data_matrix= as.matrix(data)
head(data)
# In[2]:
fig <- function(width, heigth){
# Permet de définir la taille d'un plot
# Equivalent à plt.figure(figsize= (width, heigth)) sur Python
options(repr.plot.width= width, repr.plot.height= heigth)
}
fig(5, 8)
layout(matrix(1:3, 3, 1, byrow= T))
col_named= colnames(data) # récupère le nom des pays
for (i in 1:3){
plot(dates, data[, i]
, col= "red", main= col_named[i]
, ylab= "", xlab= "Dates", type= "l")
grid(col= "grey")
}
# #### Remarque :
#
# Voici-ci ci-dessus les dynamiques de nos trois séries de taux de croissance. Premier fait, on remarque que les séries Eurozone et USA ont la même forme, mais pas la même amplitude. On a donc deux économies dont les taux de croissance réagissent de la même manière ; ci-dessous on peut voir une corrélation de près de 0.7 pour ces deux séries.
#
# Cela pourrait être important pour la suite de l'analyse, notamment pour comprendre la diffusion d'un choc d'un pays vers un autre.
# In[3]:
print("Matrice de corrélation")
cor(data_matrix)
# Autre point, mise à part la période de 2008-2012, il semblerais que les Etats-Unis et la Zone Euro aient des taux de croissance plutot stable entre 0 et 2% (4% pour les Etats-Unis). Ce n'est pas sans rappeler les théories à propos de la stagnation séculaire, même si selon Summers et Gordon les taux de croissance dans une telle situation devraient être bien plus faible (~0.5%).
#
# Dernier point à propos de la Chine, depuis la crise de 2008 et sa reprise en 2011, on remarque une tendance à la baisse de son taux de croissance. En effet, il semblerait que les 10% de croissance annuel dont profitait jusque là la Chine soit une époque révolue, avec des taux qui pourraient converger vers ceux des pays développés (type Etats-Unis) d'ici quelques années.
# En effet, les taux de croissances exeptionnels de l'Empire du Milieu était dû à l'effet de rattrapage de son économie et la réduction de ses taux est probablement liée au fait que la Chine par rattraper les pays les plus développés.
# <a id= "1"></a>
#
# ### 1). Explain what a VAR model and an impulse response function is.
# Le modèle VAR permet d’analyser des séries temporelles de manière multivariées, en étudiant les dépendances linéaires au sein de toutes les séries que l'on considère. Il se distingue d'un modèle AR par cet aspect multivarié.
#
# Pour expliquer une variable $X_t$, avec un modèle VAR d'ordre 1, nous allons utiliser la donnée de la période précédente telle que :
#
# $$VAR(1): X_t= \phi_0 + \phi_1X_{t-1} + \epsilon_t$$
# avec :
# $$X_t= \begin{pmatrix}
# chine\_growth_t\\
# USA\_growth_t\\
# Zone€\_growth_t
# \end{pmatrix}$$
#
# A l'ordre p on a:
# $$VAR(p): X_t= \phi_0 + \phi_1X_{t-1} + \phi_2X_{t-2} + ... + \phi_pX_{t-p} + \epsilon_t$$
# C'est-à-dire qu'un VAR d'ordre p permet de considérer un lien entre nos données en t et les données observées jusqu'en t-p.
#
# Ce qu'il y a de magique avec un modèle VAR, c'est qu'on peut le transformer et faire disparaître les séries $X_{t-p}$ pour ne faire apparaître que les chocs. L'idée derrière cette transformation est de pouvoir expliquer les valeurs de $X_t$ en fonction des chocs passés. En économie, c'est un concept utile par exemple pour voir comment un choc de demande se propage à travers les séries de taux de croissance, taux d'intérêt et de taux de chômage ; car en économie les variables macroéconomiques ne sont que rarement indépendantes les unes des autres, et donc observer et comprendre l'impact d'un choc provenant d'une série sur les autres est essentiel.
#
# Dans notre projet, nous avons les taux de croissance de la Chine, des Etats-Unis et de la Zone Euro de 1996 à fin 2019. Avec la mondialisation, et les connexions de plus en plus grandes entres les économies, il est intéressant de voir comment un choc de croissance dans un pays, pourrait en atteindre un autre et surtout, pendant combien de temps. Ce n'est pas sans rappeler la situation actuelle et la crise mondiale du COVID qui touche toutes nos économies. Mais on aura l'occasion d'en parler plus longuement dans la dernière question.
# Pour coder un VAR d'ordre p, il nous a fallu repenser la construction du dataset, et de son interaction avec les coefficients à optimiser.
#
# Imaginons que l'on doive construire un **VAR d'ordre 2**. Le dataset que nous devrions utiliser pour le construire ressemblerait à ceci :
#
#
# | t | Chine | USA | Zone€ | | t - 1 | Chine | USA | Zone€ | | t - 2 | Chine | USA | Zone€ |
# | --- | --- | --- | --- | | --- | --- | --- | --- | | --- | --- | --- | --- |
# | 1 | 0.109 | 0.026 | 0.012 | | | | | | | | | | |
# | 2 | 0.094 | 0.04 | 0.015 | | 1 | 0.109 | 0.026 | 0.012 | | | | |
# | 3 | 0.092 | 0.041 | 0.018 | | 2 | 0.094 | 0.04 | 0.015 | | 1 | 0.109 | 0.026 | 0.012 |
# | 4 | 0.103 | 0.044 | 0.019 | | 3 | 0.092 | 0.041 | 0.018 | | 2 | 0.094 | 0.04 | 0.015 |
# | 5 | | | | | 4 | 0.103 | 0.044 | 0.019 | | 3 | 0.092 | 0.041 | 0.018 |
# | 6 | | | | | 5 | | | | | 4 | 0.103 | 0.044 | 0.019 |
#
#
# avec notre première série $X_t$ à prédire, suivi des 2 autre séries nécessaires pour construire un VAR d'ordre 2. On se rend compte que les 2 premières lignes de $X_t$ ne sont pas observables si on souhaite construire ce VAR étant donné le vide des 2 premières lignes de la série $X_{t-2}$. Il faudra donc modéliser uniquement avec les observations à t = 3 et t = 4. Avec un ordre p, il faudrait alors modéliser sur m-p observations, avec m le nombre d'observation du dataset.
#
# On récupère chaque série lagguée indépendament, et on la multiple au set de coefficient $\phi$ qui lui est associé pour pouvoir calculer la valeur de notre prédiction $\tilde{X_t}$. Puis on calcule l'erreur de prédiction en redimensionnant bien $X_t$. Le but étant de minimiser l'erreur, on va chercher à maximiser la log vraisemblance en passant par une fonction de densité gaussienne multivariée pour chercher les bons paramètres $\phi$.
#
# Dernier point, le set de paramètre $\phi$ que l'on rentre initialement est généré aléatoirement ; il n'est pas égal à un vecteur constitué de 0. Cette solution permet, parfois, d'augmenter la vitesse de convergence de notre modèle, voire de le faire converger.
# In[4]:
mvnorm<-function(X,mu,sigma)
{
# Permet de calculer la fonction densité
# X représente un vecteur 1xn
# Mu représente un vecteur 1xn
# Sigma une matrice nxn
A=(2*pi)^(ncol(sigma)/2)
B=det(sigma)^(1/2)
C=-1/2*t(X-mu)%*%solve(sigma)%*%(X-mu)
D=exp(C)
return(1/(A*B)*D)
}
# test
mu= apply(data_matrix,2,mean)
sigma= diag(3)
mvnorm(data_matrix[2,],mu,sigma)
# In[5]:
VAR_loglik_p<-function(para, vectored)
{
# Permet de calculer la log vraisemblance d'un modèle VAR(p)
# para désigne un vecteur de (n + n*n*p) contenant les paramètres du modèle
# vectored correspond à un vecteur contenant le datatset, l'ordre du VAR, et les dimensions du datatset
# Récupère l'information du vecteur
stocked= tail(vectored, 3)
p= stocked[3]
n= stocked[1]
m= stocked[2]
X= matrix(vectored[1: (length(vectored) - 3)], m, n )
# Extraction des intercepts
phi_0= para[1:n]
# E désigne la valeur X calculée à l'aide du modèle
# On construit en amont E, ce qui nous permet d'ajouter les intercepts, et de bien définir
# sa dimension
E= matrix(phi_0, m-p, n, byrow= T)
# Si l'ordre du VAR = 3, alors il y aura 3 matrices de dimension nxn
# On récupère par itération les coefficients qui sont associés à chaque matrice (en premier
# la matrice associée aux données avec le premier retard, puis le deuxième etc.)
for (i in 1:p){
# Récupère les coefficients de la matrice du retard i
phi_i= matrix(para[((n*n*i + n) -(n*n) + 1):(n*n*i + n)], n ,n)
# Pour la matrice phi_1, les coefficients phi1_11, phi1_12, phi_13 dans le cas d'une var
# avec 3 séries, ne seront pas en ligne comme dans une représentation matricielle
# mais seront stockés dans la première colonne !!
# E= E[-1,] + X[-c((m-i+1):m),]%*%phi_i # enlève le bas
# E= E[-dim(E)[1],] + X[-c((m-i+1):m),]%*%phi_i # enlève le bas
# On fait le calcul phi_p . , mais comme les séries de X sont stockées en ligne
# et que les coefficients sont sotckés en colonne, on doit faire X . phi_p
# On enlève une partie de la matrice (le bas) qui dépend de l'ordre sur lequel on itère
# cf le markdown ?
phi_compute= X[-c((m-i+1):m),]%*%phi_i # enlève le bas de la matrice X pour associer les bons retards
if (i == p){
E= E + phi_compute
}
else {
E= E + phi_compute[-c(1:(p-i)),] # enlève le haut pour que les retards fit bien avec E et X
}
}
# Pour concorder avec le retard max (= ordre p), on doit se séparer des p premières lignes de X
residus= X[-c(1:p), ] - E
sigma= var(residus)
log_lik= 0
# Calcul de la log vraisemblance
# On commence la boucle à p+1 et non à 1 pour simplifier le raisonnement (permet de
# sélectionner les données X à partir de i)
# Mais on aurait pu commencer à 1 et on aurait modifier l'indice dans X et E
for (i in (1+p):m){
temp= mvnorm(X[i, ], E[(i-p),], sigma) # E est pris à partir de p car j'ai enlevé p lignes
# dans le processus précédent
temp= log(temp)
log_lik= log_lik - temp
}
return(log_lik)
}
# test
n= ncol(data_matrix)
p_order= 2 # ordre 2
VAR_loglik_p(numeric(n + n*n*p_order)
, c(data_matrix, n, nrow(data_matrix), p_order))
# In[6]:
# Optimisation test pour VAR(2)
n= ncol(data_matrix)
p_order= 2 # VAR d'ordre 2
estimation_auto= function(X= data_matrix, p= p_order, num= n){
# Permet de sortir les résultats de l'optim (neg log vraissemblance et coef)
# X désigne le dataset
# p l'ordre de la VAR à calculer
# n le nombre de série du VAR
# On utilise dans un premier temps des poids aléatoires compris entre 0 et 1
# mais si on a un soucis on utilise un set de paramètres avec que des 0
# (dans notre cas, on a parfois eu des problèmes)
tryCatch({
weight= round(runif(num + num*num*p, 0, 1), 1)
para= weight/sum(weight) # permet de ne pas faire bugger l'optim
estimation= optim(para, fn= VAR_loglik_p
, vectored= c(X, ncol(X), nrow(X), p)
, method= "BFGS")
print("Initialization with random parameters")
return (estimation)
}, error= function(e) # au cas où
{
# Set de paramètres 0
para= numeric(num + num*num*p)
estimation= optim(para, fn= VAR_loglik_p
, vectored= c(X, ncol(X), nrow(X), p)
, method= "BFGS")
print("Initialization with zero values for parameters")
return (estimation)
})
}
# test
estimation_auto(X= data_matrix, p= p_order, num= n)
# <a id= "2"></a>
#
# ### 2). Using information criterions, estimate the lag to be used with the three data series for your VAR model. What do you think of this lag?
# Comme il est possible de générer p VAR, il nous faut déterminer l'ordre qui est le plus intéressant pour modéliser notre série de données. Dans ce but, nous allons utiliser les fonctions précédentes pour construire tous les modèles VAR de l'ordre 1 à 10.
# Pour comparer tous ces modèles, nous allons utiliser des critères d'informations (AIC, BIC et HQ), qui permettent de prendre en compte à la fois la performance d'un modèle (la valeur de la log vraisemblance) mais aussi sa complexité (le nombre de paramètres). En effet, il est très simple d'avoir une bonne log vraisemblance en augmentant le nombre de paramètres. Mais le modèle devient trop spécifique à notre jeu de données.
# Dans le cas d'un VAR, si on prend un ordre très élevé, le nombre de paramètres sera alors plus grand qu'avec un VAR d'ordre 1. En effet, si n représente le nombre de séries, le nombre de paramètres d'un VAR d'ordre p sera alors de $n*n*p + n$.
#
# En cherchant à minimiser les critères d'informations, on trouve le modèle qui a un bon équilibre entre performance et complexité.
#
# Dans notre cas, on a représenté la valeur de ces critères dans le tableau ci-dessous.
# In[7]:
# On va chercher à savoir l'ordre du VAR que nous devons choisir
cb_de_var= 10
formule_generale= function(p, log_lik, n, m, g){
# Permet de calculer tous les critères d'informations en fonction de g
# P désigne l'ordre du VAR
# log_lik désigne la log vraisemblance du modèle calculée
# n le nombre de séries (permet de calculer le nombre de paramètres du modèle)
# m le nombre d'observations
# g correspond à la fonction d'information sélectionnée
base= -(2*log(log_lik))
k= n + n*n*p # nombre de param
return (base + (k*g))
}
bic_g= function(m){
# Permet de calculer la fonction g pour le critère BIC
return (log(m))
}
hq_g= function(m){
# Permet de calculer la fonction g pour le critère HQ
return (log(bic_g(m)))
}
# Préparation
n= ncol(data_matrix)
order_var= c(1:cb_de_var)
aic= c()
bic= c()
hq= c()
# On va itérer sur tous les ordres p sélectionnés et calculer les critères d'informations
for (i in order_var){
m_in= nrow(data_matrix) - i # à chaque ordre p, le dataset diminue de - p
estimated= estimation_auto(X= data_matrix, p= i, num= n)
log_like_estimated_i= -1*estimated$value # la valeur sortie est la negative log likelihood
# donc on multiplie par -1
aic= c(aic, formule_generale(i, log_like_estimated_i, n, m_in, g= 2))
bic= c(bic, formule_generale(i, log_like_estimated_i, n, m_in, g= bic_g(m_in)))
hq= c(hq, formule_generale(i, log_like_estimated_i, n, m_in, g= hq_g(m_in)))
}
# In[8]:
# Construction du dataset pour représenter la valeur des critères en fonction de l'ordre
df_which_order= data.frame(p_order= order_var
, AIC= aic
, BIC= bic
, HQ= hq)
df_which_order
# Pour rappel, le meilleur des modèles est celui qui a le critère le plus faible. Dans notre cas, nous avons de la chance car tous les critères nous ramènent à la même conclusion : le modèle VAR d'ordre 1 est le meilleur.
# C'est à dire qu'un lag de 1 nous permet au mieux de modéliser $X_t$.
#
# On doit vous avouer qu'on s'attendait à obtenir un lag de 2 ou de 4, étant donné la nature de nos séries. En effet, nous avons à modèliser des taux de croissance de pays ; il est probable que le taux de croissance du premier trimestre impacte celui du troisième.
# Aussi, on pensait que chacune des séries étaient autocorrélées sur plusieurs périodes (6 mois voire 1 an). Et quand on trace notre autocorrélogramme, pour chaque série séparément, on se rend compte que c'est bien le cas : on observe de fortes autocorrélations, significative jusqu'à 5 périodes, c'est à dire 1 an et 1 trimestre.
#
# En se renseignant un peu [_$^{1}$_](https://stats.stackexchange.com/questions/207156/autocorrelation-of-var-residuals) , on s'est rendu compte que les critères d'informations ne cherchent pas à minimiser l'autocorrélation ; ils déterminent le modèle qui décrit bien nos données, mais pas un modèle qui les décrit parfaitement. Dans notre cas, il est probable que chercher à supprimer l'autocorrélation passe par une trop forte hausse de la complexité. C'est pourquoi les critères nous amènent à considérer le plus petit des ordres pour le modèle.
#
# Par conséquent, si notre but est de générer un modèle pour faire de la prédiction, un lag de 1 est parfait car c'est le modèle qui nous permet au mieux de gérer performance et overfitting. Mais si notre but est d'expliquer, et que l'autocorrélation est un problème pour notre analyse économique, alors il faudrait choisir un autre moyen pour sélectionner le bon ordre pour un VAR.
#
# Pour la suite du projet, on construira un VAR d'ordre 1 (c'est à dire que l'on suit les indications des critères d'informations).
#
# ---
# $^{1}$[Stats StackExchange](https://stats.stackexchange.com/questions/207156/autocorrelation-of-var-residuals)
# <a id= "3"></a>
#
# ### 3). Simulate impact
# Même si nous allons simuler l'impact de taux de croissance négatifs avec un VAR d'ordre 1, nous avons cherché à généraliser nos fonctions à tous les VAR possibles.
#
# Le problème, c'est que plus on modèlise de lag, plus la construction d'une fonction de réponse généralisée se complexifie. En effet, on pourrait imaginer des chocs différents sur plusieurs périodes comme par exemple au premier et au dernier trimestre. Dans notre cas, vu que le choc n'arrive qu'en t, nous n'avons pas besoin d'aller aussi loin dans la construction, mais nous avons quand même proposé une solution à ce problème.
#
# La solution que nous avons trouvé est de tranformer n'importe quel VAR p en un VAR d'ordre 1. Voici comment se présente notre transformation :
# Si on a $X_t$ nos n séries de données, $\phi_p$ notre matrice de coefficients associée aux séries lagguées à p périodes et $\epsilon_t$ le vecteur d'erreur de prédiction, exprimés comme suit:
#
# $$X_t= \begin{pmatrix}
# chine\_growth_t\\
# USA\_growth_t\\
# Zone€\_growth_t
# \end{pmatrix}$$
# et
# $$\phi_{p}=\begin{pmatrix}
# \phi_{1,1,p}&\phi_{1,2,p}&...&\phi_{1,n,p}\\
# \phi_{2,1,p}&\phi_{2,2,p}&...&\phi_{2,n,p}\\
# ...&...&...&...\\
# \phi_{n,1,p}&...&...&\phi_{n,n,p}
# \end{pmatrix}$$
# et
# $$\epsilon_{t}=\begin{pmatrix}
# \epsilon_{chine_t}\\
# \epsilon_{usa_t}\\
# \epsilon_{zone€_t}
# \end{pmatrix}$$
# $$$$
# alors notre représentation d'un VAR p en VAR 1 se fait de la manière suivante:
# $$$$
# $$\begin{pmatrix}
# X_t\\
# X_{t-1}\\
# ...\\
# ...\\
# X_{t-p+1}
# \end{pmatrix}
# =\begin{pmatrix}
# \phi_{1}&\phi_{2}&...&...&...&\phi_{p}\\
# 1\\
# 0&1&...\\
# ...\\
# 0&...&...&1&...&0
# \end{pmatrix}
# \begin{pmatrix}
# X_{t-1}\\
# X_{t-2}\\
# ...\\
# ...\\
# X_{t-p}
# \end{pmatrix}
# +\begin{pmatrix}
# \epsilon{t}\\
# 0\\
# ...\\
# ...\\
# 0
# \end{pmatrix}$$
# $$$$
# Etant donné que l'on sait construire la fonction de réponse généralisée à partir d'une matrice $phi_1$ dans le cas d'un VAR 1, on peut généraliser sa construction avec cette matrice (ci-dessus). Avec cette construction on pourra alors simuler des chocs sur plusieurs périodes à la fois. On a donc développé cette manière d'exprimer un VAR d'ordre p. Néanmoins, nous n'avons pas fait en sorte de pouvoir générer des chocs sur plus d'une période. Ainsi, pour construire la fonction de réponse, nous avons uniquement utilisé $\phi_1$ étant donné que le choc apparaît à la première période.
# In[9]:
# le but est de construire une matrice tel que en colonne on a
# Calculer l'impact d'un choc avec un VAR p est compliqué
# On va chercher à transformer notre VAR p en un VAR 1
transformation_Xt_varp= function(X, p_order){
# Permet de transformer le dataset initial pour permettre la permutation d'un VAR p à un VAR 1
# X désigne notre dataset
# p_ordre désigne l'ordre du VAR
n= ncol(X)
m= nrow(X)
# Si on a un VAR 1, alors on ne change pas notre dataset
if (p_order == 1){
return (X)
}
else {
tested= X
stocked= X[-c((m-p_order+1):m), ] # série initiale Xt
# Le but est de pouvoir coller les séries de données Xt, Xt-1, ... , Xt-p
# On a donc un dataset de dimension (m-p)x(n*p)
for (i in c(1:p_order)){
tested_copy= tested[-c(1:i), ]
ajout_p_col= tested_copy[-c((m-p_order+1):m), ]
stocked= cbind(ajout_p_col, stocked)
}
return(stocked)
}
}
# # Test
# p_order= 4
# test= transformation_Xt_varp(data_matrix, p_order)
# head(test)
# In[10]:
phi_zero_compute= function(X, p_order){
# Permet de sortir la matrice d'intercept
n= ncol(X)
m= nrow(X)
estimation_good_var= estimation_auto(X= X, p= p_order, num= n)
para= estimation_good_var$par
phi_zero= para[1:n]
return (phi_zero)
}
phi_transforma= function(X, p_order){
# Permet d'assembler toutes les matrices phi qui nous permettent la transformation VAR p -> VAR 1
n= ncol(X)
m= nrow(X)
estimation_good_var= estimation_auto(X= X, p= p_order, num= n)
para= estimation_good_var$par
# On ne fait pas de transformation si on a un VAR d'ordre 1
# On retourne uniquement les paramètres
if (p_order == 1){
phi_uno= matrix(para[(n+1):length(para)],n,n)
return (phi_uno)
}
else {
# Assemblage des coefficients
# On va stack de manière horizontale les matrices de coef phi 1 à phi p
stock_phi= matrix(numeric(n*n), n, n)
for (i in 1:p_order){
phi_i= matrix(para[((n*n*i + n) -(n*n) + 1):(n*n*i + n)], n ,n)
stock_phi= rbind(stock_phi, phi_i)
}
stock_phi= stock_phi[-c(1:n), ]
# On va combler les trous pour fiter notre matrice de coef aux nouveaux set données
# calculé transformation_Xt_varp
# La matrice de coef sera de dimension (n*p)x(n*p)
dim_n_p= n*p_order
identity_mat= diag(dim_n_p-n) # permet lors du calcul X . phi d'afficher
# Xt-1 = Xt-1 , Xt-2 = Xt-2
zero_mat_ligne= matrix(numeric((dim_n_p - n)*n), n, (dim_n_p - n))
stock_phi= cbind(stock_phi, rbind(identity_mat, zero_mat_ligne))
return (stock_phi)
}
}
phi_zero_transforma= function(X, p_order){
# Permet de redéfinir la matrice phi 0 (intercept) avec le dataset de transformation_Xt_varp
# X correspond au dataset (dimention m*n)
# p_order correspond à l'odre du VAR
# Cas particulier dans le cas d'un VAR 1, on ne fait pas de transformation
if (p_order == 1){
return (phi_zero_compute(X, 1))
}
else {
phi_zero= phi_zero_compute(X, p_order)
phi_matrice= phi_transforma(X, p_order)
diff_dim_col= ncol(phi_matrice) - length(phi_zero)
# On comble la matrice avec des 0 à gauche
zero_comble= matrix(numeric((diff_dim_col*nrow(phi_matrice)))
, nrow(phi_matrice), diff_dim_col)
phi_zero_new= cbind(matrix(phi_zero, nrow(phi_matrice), length(phi_zero), byrow= T)
, zero_comble)
return (phi_zero_new)
}
}
# # test
# p_order= 2
# phi_transforma(data_matrix, p_order)
# In[11]:
error_transformation= function(X, p_order, stock_phi, phi_zero){
# Permet de calculer l'erreur. Cette fonction permet de prendre en considération
# la structure du dataset d'un VAR p
n= ncol(data_matrix)
m= nrow(data_matrix)
dim_n_p= n*p_order
if (p_order == 1){
calcul_value= X[1:(m-1), ]%*%stock_phi + matrix(phi_zero, (m-1), n, byrow= T)
errors= X[-1, ] - calcul_value
return (errors)
}
else {
test= transformation_Xt_varp(X, p_order)
# enlève les 3 première colonnes qui sont t et pas t-1
train= test[, -c(1:n)]
# on a enlevé la dernière pour avoir t à t -p +1
true_vals= test[, -c((dim_n_p + 1):(dim_n_p + n))]
calcul_value= train%*%stock_phi + matrix(phi_zero[1, ]
, nrow(train), ncol(phi_zero), byrow= T)
# on calcule l'erreur
errors= true_vals - calcul_value
return (errors)
}
}
# # test
# p_order= 1
# stock_test_phi= phi_transforma(data_matrix, p_order)
# phi_zero= phi_zero_transforma(data_matrix, p_order)
# head(error_transformation(data_matrix, p_order, stock_test_phi, phi_zero))
# In[12]:
compute_choleski_p= function(X, error, p_order){
# Permet de récupérer la matrice triangulaire selon la factorisation de choleski
# X désigne le dataset
# error désigne la matrice d'erreur
# p_order désigne l'ordure du VAR
n= ncol(X)
if (p_order == 1){
sigma= var(error)
} else {
error_resized= error[, -c((n+1):dim(error)[1])]
sigma= var(error_resized)
}
p= t(chol(sigma))
return (p)
}
# In[13]:
irf_compute= function(X, p_order, phi_matrix, horizon, vecteur_choc, p){
# Permet de calculer la réponse à un choc
# on récupère la première matrice phi pour calculer notre choc étant donné
# que le choc n'a lieu qu'à une période
IRF= c()
n= ncol(X)
e= vecteur_choc
# Cas spécial pour un VAR 1
if (p_order == 1){
for (i in 1:horizon){
phi= phi_matrix^i
temp= phi%*%p%*%e
IRF= cbind(IRF, temp)
}
} else {
# On récupère la matrice phi 1
new_phi= stock_test_phi[ ,-c((n+1):dim(stock_test_phi)[1])]
new_phi_first= new_phi[c(1:n), c(1:n)]
for (i in 1:horizon){
phi= new_phi_first^i
temp= phi%*%p%*%e
IRF= cbind(IRF, temp)
}
}
return (IRF)
}
# test
# horizon= 4
# e= c(0, -0.05, 0)
# p_mat= compute_choleski_p(computed_error, p_order)
# irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# In[14]:
plot_irf= function(X, IRF){
# Permet de plot nos IRF
# X désigne notre dataset
# IRf repésente la matrice de dimension nxhorizon des chocs pour
# chaque série (n) sur les différentes périodes (horizon)
n= ncol(data_matrix)
# Si le nombre de colonne est impair, alors j'ajoute une case vide dans layout
if ((n %% 2) == 0){
layout(matrix(1:n, n/2, n/2))
} else {
n_1= n + 1
layout(matrix(1:n_1, n_1/2, n_1/2))
}
for (i in 1:3){
plot(IRF[i,], main= colnames(X)[i], ylim= range(0, IRF)
, col= "red", pch= "+", cex= 1.5, ylab= "IRF", xlab= "Horizon")
grid(col= "grey")
lines(IRF[i,]*0, lty= 1, col= "black", lwd= 2)
}
}
# # test
# fig(10, 10)
# plot_irf(data_matrix, irf_calculated)
# Avant de commencer l'analyse des chocs, il est important de comprendre que nous n'avons pas réussi à construire les intervalles de confiance.
# En effet, l'intervalle de confiance permet de créditer la véracité du choc : est-il statistiquement différent de zéro ?
#
# Nous avons essayé de mettre en place une méthode de **Block Bootstrapping** (du bootstrapping pour série temporelle, qui nous permet de créer des samples de données avec des blocks de 4 ou 5 observations regroupées au sein de la série initiale) ; mais cela ne s'est pas montré efficace à cause d'un temps de calcul beaucoup trop élevé. Vous pourrez néanmoins retrouvé en Annexe notre essai.
#
# Enfin, pour analyser l'impact d'un choc, il nous faut analyser le signe de la réponse, que l'on trouve en ordonnée. Dans notre cas, on verra que tous les chocs négatifs génèrent des réponses négatives.
# In[15]:
# test total calcul IRF pour ordre 7 et horizon 4
p_order= 1
horizon= 4
e= c(-0.08, 0, 0)
# Permet de calculer la matrice phi, de l'ordre 1 à p, pour faire la transformation var p à var 1
stock_test_phi= phi_transforma(data_matrix, p_order)
# Permet de calculer et de resizer le vecteur phi0 pour l'adapter à la transformation var p à var 1
phi_zero= phi_zero_transforma(data_matrix, p_order)
# Permet de calculer l'erreur dans le cas d'un var p (marche aussi en var 1)
computed_error= error_transformation(data_matrix, p_order, stock_test_phi, phi_zero)
# Calcul de la matrice p qui permet d'orthogonaliser mon système
p_mat= compute_choleski_p(data_matrix, computed_error, p_order)
# Calcul des chocs jusqu'à l'horizon souhaité
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -8% dans l'économie chinoise")
plot_irf(data_matrix, irf_calculated)
# Le premier choc que l'on simule est celui d'une croissance négative en Chine de 8%. On sait que la Chine est l'usine du monde. On se rend bien compte actuellement (crise du COVID) que si la Chine stoppe son appareil productif, le monde arrête de consommer. Voir l'impact d'un tel choc sur les autres pays, notamment développés est donc d'un intérêt tout particulier.
#
# Dans notre cas, on voit bien que ce choc négatif sur la Chine ne semble pas se résorber sur les 4 périodes que l'on a considéré. Attention néanmoins, peut-être que les intervalles de confiance sont très larges et donc que ce choc se résorbe au bout d'une période ; on ne pourra pas le savoir. En tout cas ce qui est sûr c'est que ce choc impact fortement la Chine (normal!) mais aussi les Etats-Unis et la Zone Euro.
#
# Dans le cas des Etats-Unis, le choc est négatif et constant sur les 4 périodes que nous avons considéré (soit 1 an). De plus, on observe le même phénomène pour la Zone Euro. A vrai dire, nous avons l'impression que le choc de croissance de l'économie chinoise modifie durablement l'équilibre de croissance de la Zone Euro et des Etats-Unis.
#
# C'est ce que nous avons cherché à observer avec le calcul du choc sur 24 périodes, et on se rend compte, que pour les Etats-Unis, ce choc a bien modifié durablement la structure de son économie avec un choc négatif et constant. Mais ce n'est pas le cas pour la Zone Euro. Enfin, même la Chine a du mal à se remettre du choc.
#
# Attention, nous n'avons pas tracé les intervalles de confiance, donc nous ne sommes pas en mesure de vérifier la fiabilité des résultats. Dans le cas des Etats-Unis, il se pourrait même que l'intervalle de confiance soit tellement large que le choc soit tout le temps nul !
#
# En tout cas, on observe bien l'interconnexion des économies, et la diffusion des chocs d'un pays vers les autres.
# In[16]:
horizon= 24
e= c(-0.08, 0, 0)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -8% dans l'économie chinoise sur 24 périodes")
plot_irf(data_matrix, irf_calculated)
# In[17]:
horizon= 4
e= c(0, -0.05, 0)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie américaine")
plot_irf(data_matrix, irf_calculated)
# Voici un choc négatif de 5% sur la croissance américaine. Comme convenu, ce choc affecte sur 1 an l'économie américaine. Et quand on observe la diffusion du choc sur 24 trimestres, on remarque que l'impact ne disparaît pas au cours du temps. C'est probablement lié au fait que pour une économie développée, un choc de -5% est un énorme impact, et que ce dernier a dû profondément changer la structure de l'appareil productif américain. Ce qui peut sembler déroutant étant donné que l'économie américaine est une économie très libérale, par exemple son marché de l'emploi est très flexible par rapport à celui de la France, ce qui est censé lui permettre de se remettre plus rapidement d'une crise économique comme celle d'un gros choc négatif de croissance. Soit l'économie américaine a en effet énormément de mal à se remettre d'un choc aussi important, soit notre modèle est mal spécifié, et donc il aurait fallu soit augmenter l'ordre du VAR, soit, lors de la construction de nos fonctions de réponses, positioner la série des Etats-Unis différemment (actuellement deuxième position).
#
# Pour la Chine cependant, le choc ne semble impacter négativement que la première période. En effet, pour toutes les autres périodes, le choc est nul ; ce que l'on peut aussi voir sur un choc diffusé sur 24 périodes ci-dessous.
#
# Pour l'économie de la Zone Euro, on remarque le même phénomène que pour un choc provenant de la Chine avec un pic négatif à la deuxième période (probablement significatif), puis une convergence vers 0. La Zone Euro reçoit pleinement l'impact d'un choc avec un décalage de 1 trimestre par rapport aux deux autres économies. Puis le choc se nullifie au bout de 10 trimestres.
# In[18]:
horizon= 24
e= c(0, -0.05, 0)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie américaine sur 24 périodes")
plot_irf(data_matrix, irf_calculated)
# In[19]:
horizon= 4
e= c(0, 0, -0.05)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie de la Zone Euro")
plot_irf(data_matrix, irf_calculated)
# Enfin, voici l'impact d'un choc négatif de 5% sur l'économie de la Zone Euro. Dans un premier temps, on remarque que pour la Zone Euro, le choc est diffu au cours du temps, car plus les trimestres passent, moins le choc impacte négativement l'économie de la Zone Euro. Vers le 15ème trimestre, son impact est nullifié.
#
# A propos de la réception de ce choc par l'économie chinoise, il est négatif et on se rend compte qu'il disparaît après une période, (3 périodes pour les Etats-Unis).
#
# Ce choc est à la fois rapidement assimilié par la Zone euro, mais aussi pour les deux autres pays.
# In[20]:
horizon= 24
e= c(0, 0, -0.05)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie américaine")
plot_irf(data_matrix, irf_calculated)
# <a id= "4"></a>
#
# ### 4). Why is it important to estimate these second-round effects today?
# Dans quelle drôle d'époque vivons-nous ! Nous avons la possibilité d'acheter un objet provenant de Chine, tout en regardant une série américaine, bien au chaud dans notre lit à Paris. Dans cette drôle d'époque, les économies du monde entier sont interconnectées. Une catastrophe en Chine aura forcément un impact sur toutes les autres économies du monde. En effet, la Chine est à la fois l'usine du monde mais aussi un important partenaire commercial de beaucoup de pays, développés ou non. Et c'est la même chose pour les Etats-Unis et la Zone Euro.
#
#
# Prenons comme exemple une politique de relance keynesienne : celle de Mauroy de 1981. Cette politique peut se résumer par une hausse des salaires en France (SMIC, fonctionnaire). Elle s'est soldé par un échec et une hausse des déficits budgétaires et extérieurs, car isolée, cette politique n'avait pas pris en considération le fait que l'argent allait permettre d'acheter des biens produits à l'étranger (le fameux lecteur de cassette nippon) et non en France. Si on analysait ce choc de demande que représente la politique de Mauroy, peut-être pourrions nous remarquer un choc positif sur le taux de croissance japonais. C'est très similaire à la théorie du battement d'ailes d'un papillon.
#
# Voila pourquoi, construire des fonctions de réponse généralisée est nécessaire et si intéressant pour les économistes. C'est pour cela que nous avons voulu savoir si une récession importante dans un pays pouvait impacter l'économie d'un autre pays et pendant combien de temps. Plusieurs faits ont été soulevés par l'analyse des fonctions de réponse. Premièrement, une récession en Chine semble impacter sur plus de 24 trimestres le taux de croissance des Etats-Unis (avec un choc négatif constant). Le fait que la Chine soit un important partenaire commercial doit expliquer le puissant impact de la récession chinoise.
# La Zone Euro s'en remet beaucoup plus facilement, ce qui est étonnant, car comme les Etats-Unis, l'empire du Milieu est le deuxième exportateur de la Zone Euro. Evidemment, il n'y a pas que des liens d'exportation / importation qui relient des pays, on pourrait par exemple considérer la migration des personnes, mais aussi des capitaux pour expliquer cette différence. Ou sinon, c'est la réponse de l'Oncle Sam qui est aberrante, car si on regarde sur 24 périodes, la réponse de la Zone Euro disparaît au bout de 10 périodes, ce qui semble plutôt cohérent. De plus, nous savons par expérience que le marché du travail américain est beaucoup plus flexible que dans certain pays d'Europe ; par exemple on observait en 2008 un taux de chômage de près de 10%, contre 4% 2 ans plus tard. Cela lui permet de faire passer la tempête et de faire survivre ses entreprises plus longtemps. C'est aussi un de ses leviers pour générer une reprise rapide. C'est pourquoi une modification structurelle de l'économie américaine suite à une crise chinoise semble peu probable : l'intervalle de confiance que nous n'avons pas tracé doit assurément nullifié le choc sur plus de 10 périodes, tout comme en Europe.
#
# Deuxièmement, nous avons simuler un choc de -5% dans la croissance américaine, et nous avons observé le même problème qu'avec la réponse du choc chinois : le choc résonne sur toutes périodes que l'on trace (24 trimestres). La réponse de l'Empire du Milieu est pour le coup intéressante car le choc n'impacte que le premier trimestre de façon négative, puis disparaît. Enfin pour la Zone Euro, le choc est négatif et ceux jusqu'à 10 périodes environ.
#
# Cette réponse négative chinoise au choc américain semble similaire à un choc qui apparaîtrait en Zone Euro. Etant donné que l'Oncle Same et la Zone euro sont des marchés privilégiés pour la Chine, ce n'est pas étonant de voir apparaître une réponse négative. Mais, le fait qu'il disparaisse après 1 trimestre semble nous signaler que la Chine dissipe rapidement l'écho du choc, soit en ayant la possibilité d'exporter des biens ailleurs, soit les pays en crises ne penvent se passer des biens / capitaux chinois.
#
# Pour conclure, la situation actuelle nous semble bien différente de ces simulations étant donné que la crise sanitaire est un choc qui touche tous les pays, mais de façon retardée : la Chine en premier, l'Europe en deuxième puis finalement les Etats-Unis. En plus du choc, nous devons subir les echos des chocs (les réponses) : la crise en Chine a impacté le tourisme en France, le secteur du luxe, et donc bien avant de nous même subir la crise de plein fouet, nous y avons goûter par l'intermédiaire de la crise en Chine. C'etait alors la réponse de la France au choc de la crise de l'Empire du Milieu. Mais la France est aussi touchée par la crise sanitaire (télétravail , chômage partiel, confinement) ce qui rend à notre sens ardu de modéliser la crise et sa raisonnance avec un modèle VAR et ses fonctions de réponses.
# <a id= "a"></a>
#
# ### Annexes
#
# Voici notre tentative pour créer les intervalles de confiance pour les fonctions de réponses généralisées à l'aide de block bootstrapping.
# In[21]:
# calculons les intervalles de confiances pour les IRF
# on va utiliser une méthode de block bootstrap
# on va récupérer 95 % random lines de notre matrice
# calculer les coeff, l'erreur centrée et recalculer les IRF
# on va garder pour chaque période le max et le min pour chaque pays
block_bootstrap= function(X, ic){
# Permet de générer un échantillon block bootstrapped
# Le block bootstrapping permet de faire du bootstrapping sur une série, et donc en récupérant
# des lignes proches.
# Avec un bootstrap classique, la série temp perdrait sa notion de temps et d'autocorr (lien intertemporel)
# X désigne notre dataset
# ic désigne la part des lignes du dataset que l'on va prendre
# pour générer ce block bootstrapped
m= nrow(X)
n= ncol(X)
ic= 95
num= floor(m*ic/100)
# permet de block bootstrap ma série
stocked_data= matrix(0, 1, n, byrow= T)
for (i in 1:num){
# On va récupérer 2 lignes avant et après la ligne que l'on a sélectionné et les ajouter
# à la suite
random_id= floor(runif(1, 3, m - 2)) # on commence à 3 car on récupère 2 index en amont
# et on finit à m - 2 car on récupère 2 lignes
# après l'id sélectionné aléatoirement
before_1= random_id -2
before= random_id - 1
after= random_id + 1
after_1= random_id + 2
select_val= c(before_1, before, random_id, after, after_1)
data_sample= X[select_val, ]
stocked_data= rbind(stocked_data, data_sample)
}
stocked_data= stocked_data[-1, ] # supprime la première ligne
return (stocked_data)
}
head(block_bootstrap(data_matrix, 95))
# In[22]:
# p_order= 2
# horizon= 4
# e= c(-0.08, 0, 0)
# # on génère le dataset bootstrap
# data_bootstraped= block_bootstrap(data_matrix, 95)
# # Permet de calculer la matrice phi, de l'ordre 1 à p, pour faire la transformation var p à var 1
# stock_test_phi= phi_transforma(data_bootstraped, p_order)
# # Permet de calculer et de resizer le vecteur phi0 pour l'adapter à la transformation var p à var 1
# phi_zero= phi_zero_transforma(data_bootstraped, p_order)
# # Permet de calculer l'erreur dans le cas d'un var p (marche aussi en var 1)
# computed_error= error_transformation(data_bootstraped, p_order, stock_test_phi, phi_zero)
# ############################################################
# # on centre notre erreur
# mean_error= t(as.matrix(colMeans(computed_error)))
# mean_error_resized= matrix(mean_error, nrow(computed_error), ncol(mean_error), byrow= T)
# centr_error= computed_error - mean_error_resized
# centr_error_shuffle= centr_error[sample(nrow(centr_error)), ]
# n= ncol(data_bootstraped)
# propre_error= centr_error_shuffle[, c(1:n)]
# # ajout de l'erreur à notre sample
# prop_data_boots= data_bootstraped[-c(1:p_order), ] + propre_error
# #############################################################
# # On va à partir du dataset prop_data_boots
|
[
"[email protected]"
] | |
14c8a651775fd01a8b08f03a40c76e197beaee1c
|
0118285a9feed3693f36659ac7646ab38b931f54
|
/satapp/wsgi.py
|
453bb017b0b3e60101701ba05b00cbd197164e0f
|
[] |
no_license
|
jdobes/openshiftplayground
|
6a4513ec10db2d74b06fcbe0c1270149e67be17a
|
5ec5bb85bca0e8d4b4e8816cff66bbd249e9c5ae
|
refs/heads/master
| 2021-08-29T23:52:25.929976 | 2017-12-15T10:20:26 | 2017-12-15T10:20:26 | 111,908,558 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,079 |
py
|
import ujson
import falcon
import errata
DB_HOSTNAME = "postgresql-slave"
EXAMPLE_MSG = "Example:\ncurl http://<FQDN>/errata?pkg=<nvrea>\n"
cursor = errata.init_db(errata.DEFAULT_DB_NAME, errata.DEFAULT_DB_USER, errata.DEFAULT_DB_PASSWORD,
DB_HOSTNAME, errata.DEFAULT_DB_PORT)
class Test(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "%s" % EXAMPLE_MSG
class Errata(object):
def on_get(self, req, resp):
parameters = req.params
if not "pkg" in parameters:
resp.status = falcon.HTTP_400
resp.body = "Package not specified.\n%s" % EXAMPLE_MSG
elif isinstance(parameters["pkg"], list):
resp.status = falcon.HTTP_400
resp.body = "Multiple packages specified.\n%s" % EXAMPLE_MSG
else:
resp.status = falcon.HTTP_200
resp.body = ujson.dumps(errata.process(parameters["pkg"], cursor))
application = falcon.API()
application.add_route('/test', Test())
application.add_route('/errata', Errata())
|
[
"[email protected]"
] | |
8ce03b3371e6d779882789bcd8cb8042ee53e469
|
0c60cb4fdb2c6ad2992dc744d74218b0c2d5bb31
|
/3/1.py
|
38021d74e97611ea18ef3a4596068498e6378b8a
|
[] |
no_license
|
vojtechcima/aoc2016
|
8b59ff1c28b23a7f8a358f6980325564e4cf820f
|
a9ff55f0a365a3a2cc5dd818eb8640d775950b74
|
refs/heads/master
| 2020-06-15T13:42:01.592624 | 2016-12-08T09:35:55 | 2016-12-08T09:35:55 | 75,288,226 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 503 |
py
|
def read_lines(path):
with open(path, "r") as f:
lines = f.readlines()
return lines
def parse_line(line, delimeter=" "):
return [int(x.strip()) for x in line.split(delimeter) if x]
def triangle(l):
print l
for i in range(3):
if l[i % 3] + l[(i + 1) % 3] <= l[(i + 2) % 3]:
return False
return True
lines = read_lines("input.txt")
counter = 0
for l in lines:
if triangle(parse_line(l)):
counter += 1
result = counter
print result
|
[
"[email protected]"
] | |
ba11745933fa5c61976989834c195771c5305183
|
0aa5187e4bfa91434ac8446aced2763faac0d3b9
|
/numerical_analysis.py
|
c55bc690f55acc5d99b5d9512e011575833495de
|
[] |
no_license
|
nadavpo/real_fake_im_classifier
|
889879ef26e74fe686ade52372b7697cb41c732c
|
597bad2b3699fad8c629c6217db68a390d0f6adb
|
refs/heads/main
| 2023-09-04T22:43:34.230129 | 2021-11-17T18:31:07 | 2021-11-17T18:31:07 | 428,213,391 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,097 |
py
|
"""Plot ROC and DET curves."""
import os
import argparse
import torch
import scipy.stats as sp
import matplotlib.pyplot as plt
from sklearn import metrics
from torch.utils.data import DataLoader
from common import FIGURES_DIR
from utils import load_dataset, load_model
device = "cpu"#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Arguments
def parse_args():
"""Parse script arguments.
Returns:
Namespace with model name, checkpoint path and dataset name.
"""
parser = argparse.ArgumentParser(description='Analyze network performance.')
parser.add_argument('--model', '-m',
default='XceptionBased', type=str,
help='Model name: SimpleNet or XceptionBased.') # default='XceptionBased'
parser.add_argument('--checkpoint_path', '-cpp',
default='checkpoints/synthetic_dataset_XceptionBased_Adam.pt', type=str,
help='Path to model checkpoint.') # default='checkpoints/XceptionBased.pt'
parser.add_argument('--dataset', '-d',
default='synthetic_dataset', type=str,
help='Dataset: fakes_dataset or synthetic_dataset.')
return parser.parse_args()
def get_soft_scores_and_true_labels(dataset, model):
"""Return the soft scores and ground truth labels for the dataset.
Loop through the dataset (in batches), log the model's soft scores for
all samples in two iterables: all_first_soft_scores and
all_second_soft_scores. Log the corresponding ground truth labels in
gt_labels.
Args:
dataset: the test dataset to scan.
model: the model used to compute the prediction.
Returns:
(all_first_soft_scores, all_second_soft_scores, gt_labels):
all_first_soft_scores: an iterable holding the model's first
inference result on the images in the dataset (data in index = 0).
all_second_soft_scores: an iterable holding the model's second
inference result on the images in the dataset (data in index = 1).
gt_labels: an iterable holding the samples' ground truth labels.
"""
test_dataloader = DataLoader(dataset,32,shuffle=True)
model = model.to(device=device)
all_first_soft_scores = []
all_second_soft_scores = []
gt_labels = []
for batch_idx, (inputs, targets) in enumerate(test_dataloader):
inputs = inputs.to(device)
targets = targets.to(device)
with torch.no_grad():
scores = model(inputs)
all_first_soft_scores = all_first_soft_scores + scores[:,0].tolist()
all_second_soft_scores = all_second_soft_scores + scores[:, 1].tolist()
gt_labels = gt_labels + targets.tolist()
return all_first_soft_scores, all_second_soft_scores, gt_labels
def plot_roc_curve(roc_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels):
"""Plot a ROC curve for the two scores on the given figure.
Args:
roc_curve_figure: the figure to plot on.
all_first_soft_scores: iterable of soft scores.
all_second_soft_scores: iterable of soft scores.
gt_labels: ground truth labels.
Returns:
roc_curve_first_score_figure: the figure with plots on it.
"""
fpr, tpr, _ = metrics.roc_curve(gt_labels, all_first_soft_scores)
plt.plot(fpr, tpr)
fpr, tpr, _ = metrics.roc_curve(gt_labels, all_second_soft_scores)
plt.plot(fpr, tpr)
plt.grid(True)
plt.xlabel('False Positive Rate (Positive label: 1)')
plt.ylabel('True Positive Rate (Positive label: 1)')
plt.title(f'ROC curves AuC Score for the first score: '
f'{metrics.roc_auc_score(gt_labels, all_first_soft_scores):.3f}, '
f'AuC second score: '
f'{metrics.roc_auc_score(gt_labels, all_second_soft_scores):.3f}')
plt.legend(['first score', 'second score'])
roc_curve_figure.set_size_inches((8, 8))
return roc_curve_figure
def plot_det_curve(det_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels):
"""Plot a DET curve for the two scores on the given figure.
Args:
det_curve_figure: the figure to plot on.
all_first_soft_scores: iterable of soft scores.
all_second_soft_scores: iterable of soft scores.
gt_labels: ground truth labels.
Returns:
roc_curve_first_score_figure: the figure with plots on it.
"""
fpr, fnr, _ = metrics.det_curve(gt_labels, all_first_soft_scores)
plt.plot(sp.norm.ppf(fpr), sp.norm.ppf(fnr))
fpr, fnr, _ = metrics.det_curve(gt_labels, all_second_soft_scores)
plt.plot(sp.norm.ppf(fpr), sp.norm.ppf(fnr))
plt.grid(True)
plt.xlabel('False Positive Rate (Positive label: 1)')
plt.ylabel('False Negative Rate (Positive label: 1)')
plt.title('DET curve for the first score')
axes = det_curve_figure.gca()
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_labels = [
'{:.0%}'.format(s) if (100 * s).is_integer() else '{:.1%}'.format(s)
for s in ticks
]
tick_locations = sp.norm.ppf(ticks)
axes.set_xticks(tick_locations)
axes.set_xticklabels(tick_labels)
axes.set_yticks(tick_locations)
axes.set_yticklabels(tick_labels)
axes.set_ylim(-3, 3)
plt.legend(['first score', 'second score'])
det_curve_figure.set_size_inches((8, 8))
return det_curve_figure
def main():
"""Parse script arguments, log all the model's soft scores on the dataset
images and the true labels. Use the soft scores and true labels to
generate ROC and DET graphs."""
args = parse_args()
# load model
model_name = args.model
model = load_model(model_name)
model.load_state_dict(torch.load(args.checkpoint_path)['model'])
model.eval()
# load dataset
test_dataset = load_dataset(dataset_name=args.dataset, dataset_part='test')
all_first_soft_scores, all_second_soft_scores, gt_labels = \
get_soft_scores_and_true_labels(test_dataset, model)
# plot the roc curves
roc_curve_figure = plt.figure()
roc_curve_figure = plot_roc_curve(roc_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels)
roc_curve_figure.savefig(
os.path.join(FIGURES_DIR,
f'{args.dataset}_{args.model}_roc_curve.png'))
# plot the det curve for the scores of the first output of the network
det_curve_figure = plt.figure()
det_curve_figure = plot_det_curve(det_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels)
det_curve_figure.savefig(
os.path.join(FIGURES_DIR,
f'{args.dataset}_{args.model}_det_curve.png'))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b4d01dd3705d74d25a15957865fcbc913580986c
|
36afa271f080459adf1014cd23f4be9f954dfee6
|
/Crawler/Course/第八章:scrapy框架/sunPro/sunPro/spiders/sun.py
|
35ab678e80afc0bf5d06d12f11a75a5455738471
|
[] |
no_license
|
King-Of-Game/Python
|
b69186a7574ce1c0b7097207cfe9a2eb38a90bc0
|
643b9fd22efd78f6679735f23432943a57b5f5bb
|
refs/heads/master
| 2023-05-25T05:35:14.473114 | 2021-10-24T12:52:21 | 2021-10-24T12:52:21 | 151,251,434 | 3 | 0 | null | 2023-05-01T20:51:50 | 2018-10-02T12:34:04 |
HTML
|
UTF-8
|
Python
| false | false | 2,909 |
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from sunPro.items import SunproItem
from sunPro.items import DetailItem
# 需求:爬取小说分类、名称、人气、简介
class SunSpider(CrawlSpider):
name = 'sun'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.69shu.org/fenlei/1_1/']
# 链接提取器:根据指定规则(allow="正则")进行链接的提取
link_extractor = LinkExtractor(allow=r'fenlei/1_(?!16|\d{3,})')
link_detail_extractor = LinkExtractor(allow=r'/book/\d+/(?!\d+\.html)') # /book/\d+/(?!\d+\.html)
rules = (
# 规则解析器:将链接提取器提取到的链接进行指定规则(callback)的解析操作
# follow=True:可以将链接提取器继续作用到,链接提取器提取的链接,对应的页面中
Rule(link_extractor, callback='parse_novel_name', follow=False),
Rule(link_detail_extractor, callback='parse_novel_detail', follow=False),
)
'''
以下两个解析方法没有手动发起请求,是不可以实现请求传参的: 也就是说不能通过yield scrapy.Request() 回调其它函数
无法将两个解析方法解析的数据存储到同一个item中,可以依次存储到两个item中
'''
# 解析小说类别、名称、作者
def parse_novel_name(self, response):
# item = {}
# #item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
# #item['name'] = response.xpath('//div[su@id="name"]').get()
# #item['description'] = response.xpath('//div[@id="description"]').get()
# return item
print('\n', response)
# 注意:xpath表达式中不可以出现tbody标签
li_list = response.xpath('/html/body/div[3]/div/div/div[2]/div[1]/div[2]/ul/li')
for li in li_list:
novel_category = li.xpath('./span[1]/text()').extract_first()
novel_name = li.xpath('./span[2]/a/text()').extract_first()
novel_author = li.xpath('./span[4]/text()').extract_first()
# print(novel_category, novel_name, novel_author)
item = SunproItem()
item['novel_category'] = novel_category
item['novel_name'] = novel_name
item['novel_author'] = novel_author
yield item
# 解析小说人气和简介
def parse_novel_detail(self, response):
# print(response)
novel_popularity = response.xpath('//*[@id="info"]/p/span/text()').extract_first()
novel_synopsis = response.xpath('//*[@id="info"]/div[3]//text()').extract()
novel_synopsis = ''.join(novel_synopsis)
# print(novel_popularity)
item = DetailItem()
item['novel_popularity'] = novel_popularity
item['novel_synopsis'] = novel_synopsis
yield item
|
[
"[email protected]"
] | |
60fda27af18dd2cb7d19ba6fa1c357da127acf69
|
45f1501f28d71510237b51f66a215cdc7779cdab
|
/klinik/klinik/settings.py
|
07f4c10a354037ca33dfe332af832827230cb88a
|
[] |
no_license
|
Aldodev01/Django-Crud
|
809def1faa51679f46315080df9a4851f37c3830
|
9ed4844e85a09eba3db464e1c8064496dc195ef1
|
refs/heads/master
| 2023-03-10T02:20:53.491501 | 2021-02-28T02:34:46 | 2021-02-28T02:34:46 | 343,004,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,269 |
py
|
"""
Django settings for klinik project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
#import db
import pymysql
pymysql.version_info=(1,3,13,"final",0)
pymysql.install_as_MySQLdb()
#config db
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't@yx%2@gr@59^i8vm@^v^k+6oksa4$8k=blqnw&*1rg(uj9rst'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Pasien'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'klinik.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'klinik.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'klinik_django',
'user':"root",
'password':'',
'host':'127.0.0.1',
"port":"3306"
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
d17f6b1a279af3be2139adc5998d5a7d115792ec
|
ea659b7456912bd1d05c5e0c051674d3eafa2af5
|
/cmproject/settings.py
|
cc4cde58f9cf122938e507b07048da1a689fd6d9
|
[] |
no_license
|
pablomonteiro/cmproject_horaextra
|
e78dcfba04ee9a5d16ef5cca888748e6da63d3cb
|
769be04e4c616d12044b61ada279552867138005
|
refs/heads/master
| 2021-02-10T21:10:20.975535 | 2020-03-02T19:02:48 | 2020-03-02T19:02:48 | 244,419,726 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,144 |
py
|
"""
Django settings for cmproject project.
Generated by 'django-admin startproject' using Django 1.11.27.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r&h4(a*g40fkofy*c4dqqyk(utpyx0#@n58=0&c6ts%a)koqml'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'horaextra.apps.HoraextraConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cmproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cmproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
be2d4b941c14faa0176be8ec0a90f175fe943bde
|
a93ce0dbb557f9e3a0218fe22bf80d7fe42c84a2
|
/deepgmm/methods/mnist_xz_model_selection_method.py
|
0f171d6592144cf9f0978b2cea0468604d1856c0
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/AdversarialGMM
|
9d31993e7907937a80a0bef721c4f879a1009904
|
691fbe0761ee5b40dbc4e38317946a7193186af2
|
refs/heads/main
| 2023-06-24T00:22:55.120222 | 2023-06-12T18:16:19 | 2023-06-12T18:16:19 | 306,104,666 | 30 | 18 |
NOASSERTION
| 2023-06-12T18:16:20 | 2020-10-21T17:49:34 |
Python
|
UTF-8
|
Python
| false | false | 4,975 |
py
|
import torch
import torch.nn as nn
from torch.optim import Adam
from game_objectives.simple_moment_objective import OptimalMomentObjective
from learning.learning_dev_f import GradientDescentLearningDevF, SGDLearningDevF
from methods.abstract_method import AbstractMethod
from model_selection.f_history_model_selection_v2 import \
FHistoryModelSelectionV2
from model_selection.f_history_model_selection_v3 import \
FHistoryModelSelectionV3
from model_selection.learning_eval import FHistoryLearningEvalGradientDecent, \
FHistoryLearningEvalSGD
from model_selection.learning_eval_nostop import FHistoryLearningEvalSGDNoStop
from model_selection.simple_model_eval import GradientDecentSimpleModelEval, \
SGDSimpleModelEval
from models.cnn_models import LeakySoftmaxCNN, DefaultCNN, SimpleCNNModel, OtherCNN, OtherCNNV2, OtherCNNV3
from models.mlp_model import MLPModel
from optimizers.oadam import OAdam
from optimizers.optimizer_factory import OptimizerFactory
class MNISTXZModelSelectionMethod(AbstractMethod):
def __init__(self, enable_cuda=False):
AbstractMethod.__init__(self)
self.g = None
self.f = None
self.dev_f_collection = None
g_models = [
DefaultCNN(cuda=enable_cuda),
#OtherCNN(cuda=enable_cuda),
#OtherCNNV2(cuda=enable_cuda),
#OtherCNNV3(cuda=enable_cuda),
]
f_models = [
DefaultCNN(cuda=enable_cuda),
#OtherCNN(cuda=enable_cuda),
#OtherCNNV2(cuda=enable_cuda),
#OtherCNNV3(cuda=enable_cuda),
]
g_learning_rates = [5e-6, 2e-6, 1e-6]
# g_learning_rates = [0.00001]
game_objective = OptimalMomentObjective()
# g_learning_rates = [0.0005]
# game_objectives = [OptimalMomentObjective(lambda_1=0.5)]
learning_setups = []
for g_lr in g_learning_rates:
learning_setup = {
"g_optimizer_factory": OptimizerFactory(
OAdam, lr=g_lr, betas=(0.5, 0.9)),
"f_optimizer_factory": OptimizerFactory(
OAdam, lr=5.0*g_lr, betas=(0.5, 0.9)),
"game_objective": game_objective
}
learning_setups.append(learning_setup)
default_g_opt_factory = OptimizerFactory(
Adam, lr=0.0001, betas=(0.5, 0.9))
default_f_opt_factory = OptimizerFactory(
Adam, lr=0.0001, betas=(0.5, 0.9))
g_simple_model_eval = SGDSimpleModelEval(
max_num_epoch=50, max_no_progress=10, batch_size=1024, eval_freq=1)
f_simple_model_eval = SGDSimpleModelEval(
max_num_epoch=50, max_no_progress=10, batch_size=1024, eval_freq=1)
learning_eval = FHistoryLearningEvalSGDNoStop(
num_epochs=60, eval_freq=1, batch_size=1024)
self.model_selection = FHistoryModelSelectionV3(
g_model_list=g_models,
f_model_list=f_models,
learning_args_list=learning_setups,
default_g_optimizer_factory=default_g_opt_factory,
default_f_optimizer_factory=default_f_opt_factory,
g_simple_model_eval=g_simple_model_eval,
f_simple_model_eval=f_simple_model_eval,
learning_eval=learning_eval,
psi_eval_burn_in=30, psi_eval_max_no_progress=10,
)
self.default_g_opt_factory = default_g_opt_factory
def fit(self, x_train, z_train, y_train, x_dev, z_dev, y_dev,
video_plotter=None, verbose=False, g_dev=None):
g, f, learning_args, dev_f_collection, e_dev_tilde = \
self.model_selection.do_model_selection(
x_train=x_train, z_train=z_train, y_train=y_train,
x_dev=x_dev, z_dev=z_dev, y_dev=y_dev, verbose=verbose)
self.g = g
self.f = f
self.dev_f_collection = dev_f_collection
g_optimizer = learning_args["g_optimizer_factory"](g)
f_optimizer = learning_args["f_optimizer_factory"](f)
game_objective = learning_args["game_objective"]
learner = SGDLearningDevF(
game_objective=game_objective, g=g, f=f,
g_optimizer=g_optimizer, f_optimizer=f_optimizer,
dev_f_collection=dev_f_collection, e_dev_tilde=e_dev_tilde,
final_g_optimizer_factory=self.default_g_opt_factory,
video_plotter=video_plotter, do_averaging=False, batch_size=1024,
eval_freq=1, max_num_epochs=50, max_no_progress=10, burn_in=30,
print_freq_mul=1)
learner.fit_from_tensors(x_train, y_train, z_train,
x_dev, z_dev, y_dev, w_train=x_train,
g_dev=g_dev, verbose=verbose)
def predict(self, x_test):
if self.g is None:
raise AttributeError("Trying to call 'predict' before "
"calling 'fit'")
self.g = self.g.eval()
return self.g(x_test)
|
[
"[email protected]"
] | |
30a88e9a85e870ada62701b27587c4a9ba59ba65
|
a5b43123d91d23581ae1f1cc725d7b004a4caa25
|
/python/counting rock samples.py
|
53f459ff4dba284ce0183deb39eef9b2e3a04b37
|
[] |
no_license
|
ivan570/code
|
376001416a01f0a870a0d73796f1a61dd3bfe958
|
e5a8e9bf7c9ea27b070ca3f351bb54cb16ce0317
|
refs/heads/main
| 2023-05-23T04:48:30.477060 | 2021-06-11T15:50:30 | 2021-06-11T15:50:30 | 330,686,610 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 325 |
py
|
from collections import Counter
S, R = map(int, input().split())
sample = list(map(int, input().rstrip().split()))
p = dict(Counter(sample))
for _ in range(R):
start, end = map(int, input().split())
c = 0
for i in range(start, end + 1):
if p.get(i) is not None:
c += p.get(i)
print(c)
|
[
"[email protected]"
] | |
77d3ccb4fbb606e29dc100993d9286af9143d806
|
f00767fdeed6bfa8b12f6900b9f9bd5c70786895
|
/models/base_db.py
|
b9ec16abf725b932e97446cf9463b303db180b0b
|
[] |
no_license
|
guoyu07/domain_whois_query
|
de22cb5d83db2441ba512935fd7f3ed5c158997a
|
c70b52f2b9306e4b9ead273de279cd149052623f
|
refs/heads/master
| 2020-12-07T06:24:57.907042 | 2015-11-29T00:53:31 | 2015-11-29T00:53:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 313 |
py
|
# encoding:utf-8
"""
操作数据库基础类
"""
import torndb
class BaseDb(object):
def __init__(self):
self.db = torndb.Connection(
host="172.26.253.3",
database="DomainWhois",
user="root",
password="platform",
charset="utf8"
)
|
[
"[email protected]"
] | |
ea9e7a8b99cd02b1f71e0f5c2c419a055b084728
|
fe0bca3fcf363ebc465fcc370e77b55df1cfaaa7
|
/src/route/work_viewer.py
|
f79466d814c37cc4151ac1ca0217dbe9d45950dc
|
[] |
no_license
|
sihcpro/todo-list
|
66847aece556fe45223b98ecc44f04bbaaf17b55
|
1db48a63e9f4d309d57baeca691f6e85c36866a6
|
refs/heads/master
| 2022-11-17T14:34:20.316901 | 2020-07-14T10:16:18 | 2020-07-14T10:16:18 | 279,233,154 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,661 |
py
|
import calendar
from datetime import timedelta
from sqlalchemy import Date, and_, cast, or_
from .data_define import ShowWorkData
from .resource import WorkResource
def configWorkViewer(Domain):
session = Domain.session
def getValidatedDate(param):
date_data = ShowWorkData(
from_date=param["from_date"][0], to_date=param["to_date"][0],
)
if date_data.from_date > date_data.to_date:
raise ValueError("from_date must smaller than to_date")
return date_data
def getWorkInAPerius(from_date, to_date):
record = {"from_date": str(from_date), "to_date": str(to_date)}
if from_date == to_date:
works = (
session.query(WorkResource)
.filter(
or_(
cast(WorkResource.starting_date, Date) == to_date,
cast(WorkResource.ending_date, Date) == to_date,
and_(
cast(WorkResource.starting_date, Date) < to_date,
cast(WorkResource.ending_date, Date) > to_date,
),
)
)
.all()
)
else:
works = (
session.query(WorkResource)
.filter(
or_(
and_(
WorkResource.starting_date >= from_date,
WorkResource.starting_date < to_date,
),
and_(
WorkResource.ending_date >= from_date,
WorkResource.ending_date < to_date,
),
and_(
WorkResource.starting_date <= from_date,
WorkResource.ending_date >= to_date,
),
)
)
.all()
)
record["works"] = [work.asDict() for work in works]
return record
@Domain.registerQuery("show-work-by-date")
def showWorkByDate(data, identifier, param):
date_data = getValidatedDate(param)
date = date_data.from_date
results = []
while date <= date_data.to_date:
results.append(getWorkInAPerius(date, date))
date += timedelta(days=1)
return results
@Domain.registerQuery("show-work-by-week")
def showWorkByWeek(data, identifier, param):
date_data = getValidatedDate(param)
date = date_data.from_date
date = date - timedelta(days=date.weekday())
results = []
while date <= date_data.to_date:
start_date = date
end_date = date + timedelta(weeks=1) - timedelta(microseconds=1)
results.append(getWorkInAPerius(start_date, end_date))
date += timedelta(weeks=1)
return results
@Domain.registerQuery("show-work-by-month")
def showWorkByMonth(data, identifier, param):
date_data = getValidatedDate(param)
date = date_data.from_date
date = date - timedelta(days=date.day - 1)
results = []
while date <= date_data.to_date:
days_in_month = calendar.monthrange(date.year, date.month)[1]
start_date = date
end_date = (
date
+ timedelta(days=days_in_month)
- timedelta(microseconds=1)
)
results.append(getWorkInAPerius(start_date, end_date))
date += timedelta(days=days_in_month)
return results
|
[
"[email protected]"
] | |
75750e2d778d9088cc0aa9d4e0a9b23d245d0029
|
7041c85dffb757c3e7063118730363f32ebb9b8a
|
/프로젝트/20190111/open_api.py
|
af937d2499eb4c1f56272d6930b3d2c64641b4f6
|
[] |
no_license
|
woonji913/til
|
efae551baff56f3ca16169b93185a65f4d81cd7a
|
a05efc68f88f535c26cb4d4a396a1e9cd6bf0248
|
refs/heads/master
| 2021-06-06T23:17:54.504620 | 2019-06-19T04:29:18 | 2019-06-19T04:29:18 | 163,778,844 | 1 | 0 | null | 2021-05-08T16:27:17 | 2019-01-02T01:08:19 |
HTML
|
UTF-8
|
Python
| false | false | 1,240 |
py
|
import requests
from bs4 import BeautifulSoup
import csv, datetime, os
date = datetime.date(2019, 1, 13)
weeks = datetime.timedelta(7)
movies = []
check = set()
key = os.environ['KEY']
for i in range(10):
current = date - weeks * i
url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchWeeklyBoxOfficeList.json?key={key}&weekGb=0&targetDt='
url += str(current.strftime('%Y%m%d'))
res_json = requests.get(url).json()
for j in res_json['boxOfficeResult']['weeklyBoxOfficeList']:
code = j['movieCd']
name = j['movieNm']
total_aud = j['audiAcc']
if code not in check:
print(name)
movies.append({'movie_code': code, 'title': name, 'audience': total_aud, 'recorded_at': current})
check.add(code)
# movieIDDF = pd.DataFrame()
# movieIDDF = movieIDDF.append({"movieCd":" ", "movieNM": " ", "audiCnt": " ", "openDt": " "}, ignore_index = True)
# # pprint(movieIDDF)
with open('boxoffice.csv', 'w', encoding='utf-8', newline='') as f:
fieldnames = ('movie_code', 'title', 'audience', 'recorded_at')
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for movie in movies:
writer.writerow(movie)
|
[
"[email protected]"
] | |
7d31fe18877c6078bd75cf9d7badeddd503d0e55
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/shell/container_manager.py
|
0d0d575c450a0cd9d608a7c9e7729ac697b061c0
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,451 |
py
|
from .toolsession import ToolSession
from hubcheck.exceptions import ConnectionClosedError
from hubcheck.exceptions import SessionCreateError
import logging
import pprint
import re
import hubcheck.conf
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class ContainerManager(object):
__metaclass__ = Singleton
def __init__(self):
self.logger = logging.getLogger(__name__)
self._lookup = {
# Example:
# host : {
# username : {
# 'sessionobj' : sessionObj,
# 'sessions' : [ {'number' : sessionNum, 'toolname' : toolname},
# {'number' : sessionNum, 'toolname' : toolname},
# ... ],
# }
# }
}
def __repr__(self):
return "ContainerManager(%s)" % (pprint.pformat(self._lookup))
# def __del__(self):
#
# self.stop_all()
def _find_session_number_for(self,host,username,toolname=None):
self.logger.debug(
'cm looking for session number for %s on %s with toolname %s' \
% (username,host,toolname))
self.logger.debug(
'session dictionary:\n%s' \
% (pprint.pformat(self._lookup)))
session_obj = None
session_number = None
session = None
# check if the host,user combination exists
try:
sessions = self._lookup[host][username]['sessions']
session_obj = self._lookup[host][username]['sessionobj']
except KeyError:
return session_obj,session_number
if len(sessions) == 0:
session_number = None
return session_obj,session_number
if toolname is None:
# return the first available session
session_number = sessions[0]['number']
return session_obj,session_number
# find a session that matches the toolname
for session in sessions:
if session['toolname'] == toolname:
session_number = session['number']
break
return session_obj,session_number
def _create_session_number_record(self,host,username,session_number,
session_obj,toolname):
if host not in self._lookup:
self._lookup[host] = {}
session_number = int(session_number)
if username not in self._lookup[host]:
# add a new record
self.logger.debug(
'adding cm record for %s:%s -> %s,%s' \
% (host,username,session_number,toolname))
self._lookup[host][username] = {
'sessionobj' : session_obj,
'sessions' : [{'number':session_number,'toolname':toolname}],
}
else:
# update an existing record
self.logger.debug(
'updating cm record for %s:%s -> %s,%s' \
% (host,username,session_number,toolname))
self._lookup[host][username]['sessions'].append(
{'number':session_number,'toolname':toolname})
self.logger.info(
"cm user sessions: host='%s' username='%s' sessions='%s'" \
% (host,username,self._lookup[host][username]['sessions']))
def _delete_session_number_record(self,host,username,session_number):
session_number = int(session_number)
self.logger.debug(
"removing cm session record for %s:%s -> %s" \
% (host,username,session_number))
# update an existing record
for i in xrange(0,len(self._lookup[host][username]['sessions'])):
session = self._lookup[host][username]['sessions'][i]
if session['number'] == int(session_number):
del self._lookup[host][username]['sessions'][i]
break
self.logger.info(
"cm user sessions: host='%s' username='%s' sessions='%s'" \
% (host,username,self._lookup[host][username]['sessions']))
def create(self,host,username,password,session=None,title=None,toolname=None):
self.logger.info("cm creating new session")
if session is None:
session = ToolSession(host=host,
username=username,
password=password)
# read the configuration to find the name of the default workspace
if toolname is None:
toolname = hubcheck.conf.settings.default_workspace_toolname
# create the session
i,o,e = session.create(title,toolname)
output = o.read(1024)
try:
session_number = int(re.search('(\d+)',output).group(0))
except:
msg = "Failed to locate session number: %s" % (output)
raise SessionCreateError(msg)
# enter the session
ws = session.access(session_number=session_number)
# store the session number
self._create_session_number_record(host,username,session_number,session,toolname)
return ws
def access(self,host,username,password,toolname=None):
ws = None
# FIXME:
# we should probably grab all of the open sessions
# and loop through them, trying to connect. if we
# get to the end, then we open a new session.
session,session_number = self._find_session_number_for(host,username,toolname=toolname)
if session_number is not None:
# an open session was returned
# open a shell in that session
self.logger.info("cm accessing session %s" % (session_number))
try:
ws = session.access(session_number=session_number)
except ConnectionClosedError as e:
self.logger.exception(e)
self.logger.debug("session access failed, trying to recover...")
self.logger.debug("checking if closed")
# accessing the session failed
# check if the session is closed
d = session.get_open_session_detail()
for k,v in d.items():
if int(v['session_number']) == session_number:
# session is still listed in table
# probably something wrong trying to connect to it.
self.logger.debug("session %d appears open"
% (session_number))
raise
# session was not in the table, it is probably closed
# force a fall through to the next if clause
self.logger.debug("session appears closed, open a new one")
self._delete_session_number_record(host,username,session_number)
session_number = None
if session_number is None:
# no stored open sessions for the user on this host
# create a new session and store it
ws = self.create(host,username,password,session,toolname=toolname)
return ws
def sync_open_sessions(self,host=None,username=None):
self.logger.info("sync'ing open sessions: host = %s, username = %s"
% (host,username))
for key_host in self._lookup.keys():
if (host is not None) and (key_host != host):
continue
for key_user in self._lookup[key_host].keys():
if (username is not None) and (key_user != username):
continue
# get the list of open session from the "session list" command
session = self._lookup[key_host][key_user]['sessionobj']
open_sessions_dict = session.get_open_session_detail()
open_sessions = []
open_session_data = {}
for k,v in open_sessions_dict.items():
open_sessions.append(int(v['session_number']))
toolname = re.sub('_r\d+$','',v['name'])
open_session_data[int(v['session_number'])] = toolname
# figure out which sessions cm has listed as open,
# verses the sessions listed as open by "session list"
# closed_sessions = set(userd['sessions']) - set(open_sessions)
stored_session_data = self._lookup[key_host][key_user]['sessions']
stored_sessions = []
for session in stored_session_data:
stored_sessions.append(session['number'])
self.logger.debug("stored open sessions: %s" % (stored_sessions))
self.logger.debug("session list results: %s" % (open_sessions))
new_open_sessions = set(stored_sessions) & set(open_sessions)
# rebuild the container manager's open session data
self._lookup[key_host][key_user]['sessions'] = []
for session_number in new_open_sessions:
self._lookup[key_host][key_user]['sessions'].append(
{'number':session_number,
'toolname':open_session_data[session_number]}
)
self.logger.debug("new open sessions: %s"
% (self._lookup[key_host][key_user]['sessions']))
def stop(self,host,username,session_number):
"""
stop a session container
"""
self.logger.info("cm stopping session %s" % (session_number))
session = self._lookup[host][username]['sessionobj']
# check if the session is open
is_session_open = False
open_sessions_dict = session.get_open_session_detail()
for k,v in open_sessions_dict.items():
if int(v['session_number']) == int(session_number):
is_session_open = True
break
if is_session_open is False:
self.logger.info("session %s is not listed as open" % (session_number))
try:
self._delete_session_number_record(host,username,session_number)
except:
pass
return
i,o,e = session.stop(session_number=session_number)
output = o.read(1024)
self.logger.debug("session stop output: %s" % (output))
#FIXME:
# should probably read the output to make sure
# there were no errors
self._delete_session_number_record(host,username,session_number)
def stop_all(self):
for host in self._lookup.keys():
for user in self._lookup[host].keys():
sessions = list(self._lookup[host][user]['sessions'])
self.logger.debug('closing %s:%s\'s open sessions: %s'
% (host,user,sessions))
# stop each session
for s in sessions:
self.stop(host,user,s['number'])
# kill the session object
del self._lookup[host][user]['sessionobj']
self._lookup[host][user]['sessionobj'] = None
# delete the user record
del self._lookup[host][user]
self.clear()
def clear(self):
self._lookup = {}
|
[
"[email protected]"
] | |
68f3c986ea57a2c8867a281c046b02f3481a1037
|
3ef6eb6071c2de6c7e9139de1c8c1da09cc222dc
|
/generate_tfrecord.py
|
2890689a6b85655da46ec08e9394f0db8a8bb8c4
|
[] |
no_license
|
MoGaber/segmentation-of-planes-satellite-imagery-satellogic
|
c2e4f336bbb3829a8086b53a340065cfe8a9a311
|
06a4ef36baf08704b36e633d50219fa12df6c058
|
refs/heads/master
| 2023-04-15T19:38:39.640901 | 2021-04-24T02:06:18 | 2021-04-24T02:06:18 | 298,437,303 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,483 |
py
|
"""
Usage:
# From tensorflow/models/
# Create train data:
python3 generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=data/train.record
# Create test data:
python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=data/test.record
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
import sys
sys.path.insert(1, 'D:/Work/Satellogic/Yolo Work/my_guy/models/research/')
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('image_dir', '', 'Path to images')
FLAGS = flags.FLAGS
# TO-DO replace this with label map
def class_text_to_int(row_label):
if row_label == 'macncheese':
return 1
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(FLAGS.image_dir)
examples = pd.read_csv(FLAGS.csv_input)
grouped = split(examples, 'filename')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
output_path = os.path.join(os.getcwd(), FLAGS.output_path)
print('Successfully created the TFRecords: {}'.format(output_path))
if __name__ == '__main__':
tf.compat.v1.app.run()
|
[
"[email protected]"
] | |
3bc807fff68940a850babd595cc1a6d5f414496c
|
0f1db4874afcdcf16397dcb5a0e82ae4897efbe6
|
/longest_prefix_suffix/code.py
|
6b0dba7950c915f29d609e639ea736d0e9e11f17
|
[] |
no_license
|
anarkia7115/g4g
|
619a22af59f1f3cacfbd8db734f37e2c0870e7d0
|
0dafd0beb8c640395e905fc313736921571a9e5a
|
refs/heads/master
| 2020-03-30T16:08:27.453764 | 2019-03-07T01:44:47 | 2019-03-07T01:44:47 | 151,394,943 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 620 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
def main():
sys.stdin.readline()
# loop every line
for l in sys.stdin:
l = l.strip()
# print(l)
lps = 0 # init lps
curr_prefix = "" # init prefix
# loop every char
for ch in l[:-1]:
curr_prefix += ch
# curr prefix length
i = len(curr_prefix)
# check match suffix
if l[-i:] == curr_prefix:
lps = i # record lps
# print lps
print(lps)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
694553df0c0aa0de72c6cd3372d907b36a37b9fa
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3_neat/16_0_3_RTN8_solve.py
|
7578551770778fbca70157c20919e407da47b880
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 2,357 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import math
def optimal(from_, to_):
if from_ % 2 == 0:
yield from_
from_ += 1
for divider_candidate in range(from_, to_, 2):
yield divider_candidate
def get_divider(x, from_, to_):
for divider_candidate in optimal(from_, min(to_, int(math.sqrt(x)) + 1)):
if x % divider_candidate == 0:
return divider_candidate
def solve(n_and_j):
n, j = n_and_j.split(' ')
n, j = int(n), int(j)
results_candidates = []
results = []
def generate_jamcoin_candidate():
for bin_number in range(0, 2 ** (n - 1)):
yield ('1{:0%sb}1' % (n - 2)).format(bin_number)
jamcoin_candidate_generator = generate_jamcoin_candidate()
def get_jamcoin_candidate(i):
if i >= len(results_candidates):
jamcoin_candidate = next(jamcoin_candidate_generator)
results_candidates.append((
jamcoin_candidate,
{'nums': [int(jamcoin_candidate, b) for b in range(2, 11)],
'step': 2,
'results': [None] * 9}))
return results_candidates[i]
jamcoin_candidate_i = 0
max_divider = 4
max_jamcoin_i = 2
max_bin_number = 2 ** (n - 1)
while True:
jamcoin_candidate, stats = get_jamcoin_candidate(jamcoin_candidate_i)
all_done = True
for i, num in enumerate(stats['nums']):
if stats['results'][i]:
continue
divider = get_divider(num, stats['step'], max_divider)
if divider:
stats['results'][i] = divider
else:
all_done = False
if all_done:
results.append(jamcoin_candidate + ' ' + ' '.join(map(str, stats['results'])))
results_candidates.pop(jamcoin_candidate_i)
if len(results) == j:
return '\n'.join(results)
else:
jamcoin_candidate_i += 1
if jamcoin_candidate_i >= max_jamcoin_i:
max_divider += 2
jamcoin_candidate_i = 0
max_jamcoin_i = min(max_bin_number, max_jamcoin_i * 2)
if __name__ == '__main__':
cases_number = int(input())
for case_number in range(1, cases_number + 1):
input_args = input()
print('Case #%s:\n%s' % (case_number, solve(input_args)))
|
[
"[[email protected]]"
] | |
3d9809d331b11e78f16c48a0364c18ecd3672cc4
|
6cf57efb6ae16d593d6272816ad5fcb4b869c1e7
|
/bin/django-admin
|
59362b958b1dcd2bc7134477d15610d1f9927277
|
[] |
no_license
|
daltondiaz/true-promotion
|
b41171790c4e9ade7fd503117520dd2f52120c1a
|
4ff1651223ca5a98169e278af238a5d99691c6e3
|
refs/heads/master
| 2021-01-17T17:34:06.353708 | 2016-10-11T02:03:18 | 2016-10-11T02:03:18 | 70,442,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
#!/home/dalton/Dev/python/true_promotion/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"[email protected]"
] | ||
62283757532919c536c4fe9afbc9a9587785faa6
|
fd6862057a794174db628bb94a3e5397f0aeaeab
|
/django/django_full_stack/examproj/examapp/migrations/0003_auto_20200919_1933.py
|
da7dc642242d0910337bf4d796208658c4099c3a
|
[] |
no_license
|
bellos711/python_practice
|
8f7117dd7f21cb1f1549e50de9e4873a2fd0e1d9
|
19539a08830f67bd4ff445dd539e1441b2ecae72
|
refs/heads/master
| 2023-02-15T04:01:40.423472 | 2021-01-11T17:47:36 | 2021-01-11T17:47:36 | 328,255,702 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 533 |
py
|
# Generated by Django 2.2.4 on 2020-09-20 02:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('examapp', '0002_auto_20200918_0954'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='liked_wish',
),
migrations.AddField(
model_name='wish',
name='users_who_liked',
field=models.ManyToManyField(related_name='liked_wish', to='examapp.User'),
),
]
|
[
"[email protected]"
] | |
0f062cc01fbb6a98530d53acaacf401676251db0
|
921fb4c92c8c17f362f74bda23cfa8570495aba2
|
/Fight.py
|
fe99825860bd9c09fe22e4145d0a792e13238050
|
[] |
no_license
|
jamestyhurst/Europa-Barbarorum
|
b3e652559110e1e4f4c97a342b44592bafa52020
|
c34015028f69a553429d43b9852620d8e8dacd87
|
refs/heads/master
| 2021-01-01T02:42:10.946861 | 2020-02-24T07:06:42 | 2020-02-24T07:06:42 | 239,145,713 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 36 |
py
|
#Fight.py, File for combat functions
|
[
"[email protected]"
] | |
1bcd21e97a0088563cadcf935ce0e1dc6bc280f8
|
7f9954b117c7cd3e514c0643c0689245a7927e0c
|
/src/Speech_recognition.py
|
fd0c5339ea2f3ffc767352db7bfc5c6112ec6f4b
|
[
"MIT"
] |
permissive
|
pranayjoshi/Speech_recognition
|
30f0a9512724230a12000ebc0626b4f6d69b86a4
|
e42e486babffc7941ff2e425fd48c47e206ce539
|
refs/heads/master
| 2022-12-24T14:13:00.326382 | 2020-09-04T18:01:07 | 2020-09-04T18:01:07 | 168,701,681 | 2 | 4 |
MIT
| 2020-10-01T20:17:11 | 2019-02-01T13:29:57 |
Python
|
UTF-8
|
Python
| false | false | 10,035 |
py
|
"""
Project name = Pranay Assistant(Indo)
Name = Indo
Developer Name = Pranay Joshi
Version = 2.0
Old modules = Speech recognition, GTTs, PyAudio, os, re, webbrowser, smtplib, certifi, requests, pyttsx3 etc.
New Modules = google, word2number, wikipedia, time, json, datetime, ctime
"""
import speech_recognition as sr
import os
import re
import webbrowser
import smtplib
import requests
import pyttsx3
import time
from time import ctime
from word2number import w2n as converse
import wikipedia
import json
from datetime import date
# Defining global variables
engine = pyttsx3.init() # defining pyttsx3
indo = ["indo", "endo"] # deining the name by which the assistant will be called
# Intial defines
def speak(text): # This speak command will speak the text
engine.say(text)
engine.runAndWait()
speak("Hi Pranay") # Checking by speaking the developers name
def today(): # defining this to get the date
today = date.today()
return today
def present(l, command): # funtion used to check if the command is called by the user or not
ls = []
for i in indo:
for j in l:
get = str(i)+ " " + str(j)
if get in command:
return True
break
# Important function for recogninzing voice
def myCommand():
"listens for commands"
r = sr.Recognizer()
with sr.Microphone() as source:
speak('i am ready for your command')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
speak('you said:' + command +'\n')
#loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
speak("Your last command couldn\'t be heard")
command = myCommand();
return command
# This is the main assistant
def assistant(command):
# Deining important variables and calling some files
with open("mailing_list.json", "r+") as file: #for mailing system
data1 = json.load(file)
mailing_list = data1
recipient_name = list(data1.keys())
with open("app_file.json", "r+") as file: # For opening apps
data2 = json.load(file)
location = data2
apps = list(data2.keys())
with open("app_file.json", "r+") as file: # For opening apps
data3 = json.load(file)
link= data3
web = list(link.keys())
# fun questions
if 'hey indo what\'s your actual name' in command:
speak("Pranay\'s Assistant")
elif present(['what\'s up'],command):
speak('Just doing my thing')
# Web based statements
elif present([(f"open {i}") for i in web], command): # websites in websites.json
con = re.search('open (.*)', command)
con = con.group(1)
url = link[con]
webbrowser.open(url)
speak('done')
elif present(['open website'], command): # websites in realtime
con = re.search('open website (.+)', command)
if con:
domain = con.group(1)
url = 'https://www.' + domain
webbrowser.open(url)
speak('done')
# web based commands/scrapping
# jokes
elif present(['tell some jokes', 'tell some joke', "tell me some jokes", "tell me some joke"], command):
res = requests.get(
'https://icanhazdadjoke.com/',
headers={"Accept":"application/json"}
)
if res.status_code == requests.codes.ok:
speak(str(res.json()['joke']))
else:
speak('oops!I ran out of jokes')
# Wikipedia Search
elif present(["wikipedia search", "search in wikipedia"], command):
con = re.search('for (.*)', command)
con = con.group(1)
speak(f"What do you want to hear about {con} , It's Definition, A short summary, A summary, or view full page content")
response = myCommand();
if "definition" in response:
speak(f"here is the defination of {con}, " + wikipedia.summary(con, sentences=2))
elif "short summary" in command:
speak(f"here is a short summary of {con}," + wikipedia.summary(con, sentences=4))
elif " summary" in command:
speak(f"here is a quick summary of {con}" + wikipedia.summary(con))
elif "page content" in command:
print(f"here is the full page content of {con}" + wikipedia.page(con).content)
else:
print("invalid command!")
# Whether
elif present(['what\'s current weather in'],command):
con = re.search('current weather in (.*)', command)
if con:
city = con.group(1)
url2 = 'https://api.openweathermap.org/data/2.5/weather?appid=608e56270a3d78b4012bbfdda0f05234&q=' + city
res = requests.get(url2)
database = res.json()
temp = database['main']['temp']
wind = database['wind']['speed']
overall = database['weather'][0]['main']
speak(f'The Current weather in is {overall}. The tempeture is {temp}.1f degree. it\'s wind speed is {wind} ')
# Longitude & Latitude
elif present(['find longitude and latitude of'],command):
con = re.search('find longitude and latitude of(.*)', command)
if con:
city = con.group(1)
url2 = 'https://api.openweathermap.org/data/2.5/weather?appid=608e56270a3d78b4012bbfdda0f05234&q=' + city
res = requests.get(url2)
database = res.json()
lat = database['coord']['lat']
long = database['coord']['lon']
speak(f'it\'s latitude is {lat}. it\'s longitude is {long}.')
# opens apps
elif present([(f"open {i}") for i in apps],command):
con = re.search('open (.*)', command)
con = con.group(1)
val = location[con]
os.startfile(val)
speak('done')
# Sending email
elif present(['open email', "send mail"], command):
speak("'Who is the recipient?'")
recipient = myCommand()
if recipient in recipient_name:
speak('What should I say?')
content = myCommand()
# init gmail SMTP
mail = smtplib.SMTP('smtp.gmail.com', )
# identify to server
mail.ehlo()
# encrypt session
mail.starttls()
# login
mail.login('[email protected]', 'pass123')
# send message
mail.sendmail(recipient, mailing_list[recipient], content)
# end mail connection
mail.close()
speak('Email sent.')
# OS based commands
# Computer shutdown
elif 'indo shutdown' in command:
speak('understood sir')
speak('connecting to command prompt')
speak('shutting down your computer')
os.system('shutdown -s')
# stope compiling
elif 'indo quit' in command:
speak('ok sir')
speak('closing all systems')
speak('disconnecting to servers')
speak('going offline')
quit()
#present time
elif "indo what's the time" in command:
time = ctime().split(" ")[3].split(":")[0:2]
if time[0] == "00":
hours = '12'
else:
hours = time[0]
minutes = time[1]
time = hours + " hours and " + minutes + "minutes"
speak(time)
# present date
elif present(["what's the date", "what is the date today", "what is the date", "today's date","what is today's date"],command):
d2 = today().strftime("%B %d, %Y")
speak(f"today's date is{d2}")
# pausing the script
elif present(["pause for", "wait for"], command):
con = re.search('for (.*)', command)
con = str(con.group(1))
l = con.split()
con = l[0]
con = int(con)
con_st = l[1]
print(con)
con = int(con)
check = "seconds"
minute = ["minutes", "mins", "minute"]
if con_st in minute:
con *= 60
check = "minutes"
speak(f"Okay! I am taking rest for {con} {check}")
time.sleep(con)
# google based search commands
# Google search results
elif present(['show the results for', "google search", "google", "results of"],command):
con = re.search('results for (.*)', command)
con = con.group(1)
try:
from googlesearch import search
except ImportError:
print("No module named 'google' found")
l = []
query = command
i = 1
for j in search(query, tld="co.in", num=10, stop=10, pause=2):
print(str(i) + "\t" + j)
l.append(j)
i += 1
speak("Which website do you want to see. Speak the number")
res = myCommand();
print("okay")
final = converse.word_to_num(res)
webbrowser.open_new_tab(l[final])
# Search for results in youtube
elif present(["open youtube", "open youtube and search for", "youtube search", "youtube"],command):
con = command.split("for")[-1]
url = "https://www.youtube.com/results?search_query=" + con
webbrowser.get().open(url)
speak("Here is what I found for " + con + "on youtube")
# rest search in google api = btnG=1&q=
else:
webbrowser.open_new_tab('http://www.google.com/search?btnG=1&q=' + command)
#loop to continue executing multiple commands
while True:
assistant(myCommand())
|
[
"[email protected]"
] | |
3b8d00461c70b529e72e2d5764a08329dd7404a5
|
46aedfe0d90c396ce17227aa9a53343536c5969c
|
/application/utils/geocode.py
|
8a2d5608d0bd2f4c53c2e1ed3662393d33d4140d
|
[] |
no_license
|
VID-STUDY/CoffeeBot
|
8e81f36a7427414a7ce93720a3c404f30f36226f
|
512b5ba0b53d42f09414f95567d8de9ef12885bf
|
refs/heads/master
| 2022-12-27T16:37:36.372750 | 2020-10-13T18:41:30 | 2020-10-13T18:41:30 | 299,697,140 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,565 |
py
|
from math import radians, cos, sin, asin, sqrt
from yandex_geocoder import Client
from typing import Optional, AnyStr
def distance_between_two_points(first_coordinates: tuple, second_coordinates: tuple) -> tuple:
"""
Calculate the great circle distance between two pints
on the Earth (specified in decimal degrees)
:param first_coordinates: Coordinates (latitude, longitude) of first point
:param second_coordinates: Coordinates (latitude, longitude) of second point
:return: distance
"""
lat1, lon1 = first_coordinates
lat2, lon2 = second_coordinates
# Convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# Haversina formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# Radius of Earth in kilometers is 6731
km = 6371 * c
# If distance in kilometres, round the value
if km >= 1:
return round(km, 1), 'km'
else:
# If distance is smaller than 1, return metres value
metres = km * 1000
return round(metres), 'm'
def get_address_by_coordinates(coordinates: tuple) -> Optional[AnyStr]:
"""
Return address string value by coordinates
:param coordinates: Coordinates (latitude, longitude)
:return: string value
"""
client = Client('4d16304f-12ba-4134-ac9b-f0da5028a1f4')
latitude = coordinates[0]
longitude = coordinates[1]
location = client.address(longitude, latitude)
return location
|
[
"[email protected]"
] | |
4e58a5df4cfa106e927ac654f9765e8d43ca5acf
|
4ffe483f8297aa3fee8254ce569c3d260f156524
|
/code/test-suite/EstheRustConvertor/expected/ope_3.py
|
944cfb4dd8188ed2a425d563179d2e2545527d5f
|
[
"MIT"
] |
permissive
|
SamiBelaidi/LOG3210
|
5dd7fbfc4cd709c9c46a2b4dc250cb565650293c
|
03b3952af125be98fe32eefb2338767020033f51
|
refs/heads/master
| 2023-04-16T05:58:20.147795 | 2021-04-25T20:54:29 | 2021-04-25T20:54:29 | 334,222,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 69 |
py
|
variable4 = 9 - 9 + 5
variable5 = (9 + 5) - 19023
variable6 = -9 + 0
|
[
"[email protected]"
] | |
7e9261d35a8303545bab4d58c725e6db41d12152
|
84e4232a1162597cdf779eab2a329290bbcc1712
|
/Machine_Learning/D19-D20 Python Clustering Segmentation/solution/m3q1.py
|
62184d5773cf3f2467c6d60a9757ba3977acea58
|
[] |
no_license
|
ohjho/ftds_oct_2018
|
66046cff606577cf60f3caa414e65a8ecc1bffae
|
0d0bd1dad87b583bd9e4c4e51b915c1f68e400fd
|
refs/heads/master
| 2020-04-01T09:18:43.712700 | 2018-12-13T09:29:33 | 2018-12-13T09:29:33 | 153,069,742 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 223 |
py
|
# Initialize instance of StandardScaler
scaler = StandardScaler()
# Fit and transform item_data
item_data_scaled = scaler.fit_transform(item_data)
# Display first 5 rows of item_data_scaled
item_data_scaled[:5]
|
[
"[email protected]"
] | |
919db12dde6d6740bb4331a91b54bcc197fc0e1a
|
0a4b219ff87e296f7afe92967d7224e5d4bef67b
|
/Algorithms/64_Minimum_Path_Sum/Minimum_Path_Sum.py
|
08cf40c542afa3e9c0273d358d568dde19a6abaa
|
[] |
no_license
|
lirui-ML/my_leetcode
|
ef3a3fb73a85d53b14e8c112ad70c1b353c3dfba
|
13e7ec9fe7a92ab13b247bd4edeb1ada5de81a08
|
refs/heads/master
| 2021-08-07T12:26:12.508487 | 2020-12-11T06:07:29 | 2020-12-11T06:07:29 | 229,288,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,129 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
描述:最小路径和 (难度:中等)
给定一个包含非负整数的 m x n 网格,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。
说明:每次只能向下或者向右移动一步。
示例:
输入:
[
[1,3,1],
[1,5,1],
[4,2,1]
]
输出: 7
解释: 因为路径 1→3→1→1→1 的总和最小。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/minimum-path-sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def minPathSum(self, grid) -> int:
"""动态规划,时间和空间复杂度为O(m*n),二维数组"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
# initial dp[][]
dp = [[0] * n for _ in range(m)]
dp[0][0] = grid[0][0]
for i in range(1,m):
dp[i][0] = dp[i - 1][0] + grid[i][0]
for j in range(1,n):
dp[0][j] = dp[0][j - 1] + grid[0][j]
for i in range(1,m):
for j in range(1,n):
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j]
return dp[m - 1][n - 1]
def minPathSum2(self, grid):
"""优化动态规划,时间复杂度为O(m*n), 空间复杂度为0(n), 一维数组"""
if len(grid) == 0:
return 0
m = len(grid)
n = len(grid[0])
dp = [0 for _ in range(n)]
dp[0] = grid[0][0]
for i in range(1, n):
dp[i] = dp[i - 1] + grid[0][i]
for i in range(1, m):
dp[0] += grid[i][0]
for j in range(1, n):
dp[j] = min(dp[j - 1], dp[j]) + grid[i][j]
return dp[-1]
if __name__=="__main__":
ss = Solution()
test = [
[1, 3, 1, 2],
[1, 5, 1, 5],
[4, 2, 1, 2],
[2, 3, 1, 5]
]
test2 = [
[1, 3, 4, 8],
[3, 2, 2, 4],
[5, 7, 1, 9],
[2, 3, 2, 3]
]
print(ss.minPathSum(test2))
print(ss.minPathSum2(test2))
|
[
"[email protected]"
] | |
d1597ffd8c87152ec49b9949a7de3ec827c5d1d4
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/matplotlib/2017/12/setupext.py
|
2868fd76aee773dc4d8d576d9dfe80e8c6cca6b4
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null |
UTF-8
|
Python
| false | false | 68,786 |
py
|
from __future__ import print_function, absolute_import
from importlib import import_module
from distutils import sysconfig
from distutils import version
from distutils.core import Extension
import distutils.command.build_ext
import glob
import multiprocessing
import os
import platform
import re
import subprocess
from subprocess import check_output
import sys
import warnings
from textwrap import fill
import shutil
import versioneer
PY3min = (sys.version_info[0] >= 3)
def _get_home():
"""Find user's home directory if possible.
Otherwise, returns None.
:see:
http://mail.python.org/pipermail/python-list/2005-February/325395.html
"""
try:
if not PY3min and sys.platform == 'win32':
path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding())
else:
path = os.path.expanduser("~")
except ImportError:
# This happens on Google App Engine (pwd module is not present).
pass
else:
if os.path.isdir(path):
return path
for evar in ('HOME', 'USERPROFILE', 'TMP'):
path = os.environ.get(evar)
if path is not None and os.path.isdir(path):
return path
return None
def _get_xdg_cache_dir():
"""
Returns the XDG cache directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
path = os.environ.get('XDG_CACHE_HOME')
if path is None:
path = _get_home()
if path is not None:
path = os.path.join(path, '.cache', 'matplotlib')
return path
# SHA256 hashes of the FreeType tarballs
_freetype_hashes = {
'2.6.1': '0a3c7dfbda6da1e8fce29232e8e96d987ababbbf71ebc8c75659e4132c367014',
'2.6.2': '8da42fc4904e600be4b692555ae1dcbf532897da9c5b9fb5ebd3758c77e5c2d4',
'2.6.3': '7942096c40ee6fea882bd4207667ad3f24bff568b96b10fd3885e11a7baad9a3',
'2.6.4': '27f0e38347a1850ad57f84fc4dfed68ba0bc30c96a6fa6138ef84d485dd9a8d7',
'2.6.5': '3bb24add9b9ec53636a63ea8e867ed978c4f8fdd8f1fa5ccfd41171163d4249a',
'2.7': '7b657d5f872b0ab56461f3bd310bd1c5ec64619bd15f0d8e08282d494d9cfea4',
'2.7.1': '162ef25aa64480b1189cdb261228e6c5c44f212aac4b4621e28cf2157efb59f5',
'2.8': '33a28fabac471891d0523033e99c0005b95e5618dc8ffa7fa47f9dadcacb1c9b',
'2.8.1': '876711d064a6a1bd74beb18dd37f219af26100f72daaebd2d86cb493d7cd7ec6',
}
# This is the version of FreeType to use when building a local
# version. It must match the value in
# lib/matplotlib.__init__.py and also needs to be changed below in the
# embedded windows build script (grep for "REMINDER" in this file)
LOCAL_FREETYPE_VERSION = '2.6.1'
LOCAL_FREETYPE_HASH = _freetype_hashes.get(LOCAL_FREETYPE_VERSION, 'unknown')
if sys.platform != 'win32':
if not PY3min:
from commands import getstatusoutput
else:
from subprocess import getstatusoutput
if PY3min:
import configparser
else:
import ConfigParser as configparser
# matplotlib build options, which can be altered using setup.cfg
options = {
'display_status': True,
'verbose': False,
'backend': None,
'basedirlist': None
}
setup_cfg = os.environ.get('MPLSETUPCFG', 'setup.cfg')
if os.path.exists(setup_cfg):
if PY3min:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
config.read(setup_cfg)
if config.has_option('status', 'suppress'):
options['display_status'] = not config.getboolean("status", "suppress")
if config.has_option('rc_options', 'backend'):
options['backend'] = config.get("rc_options", "backend")
if config.has_option('directories', 'basedirlist'):
options['basedirlist'] = [
x.strip() for x in
config.get("directories", "basedirlist").split(',')]
if config.has_option('test', 'local_freetype'):
options['local_freetype'] = config.getboolean("test", "local_freetype")
else:
config = None
lft = bool(os.environ.get('MPLLOCALFREETYPE', False))
options['local_freetype'] = lft or options.get('local_freetype', False)
def get_win32_compiler():
"""
Determine the compiler being used on win32.
"""
# Used to determine mingw32 or msvc
# This is pretty bad logic, someone know a better way?
for v in sys.argv:
if 'mingw32' in v:
return 'mingw32'
return 'msvc'
win32_compiler = get_win32_compiler()
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__numpy__')):
exec(line.strip())
return locals()
def has_include_file(include_dirs, filename):
"""
Returns `True` if `filename` can be found in one of the
directories in `include_dirs`.
"""
if sys.platform == 'win32':
include_dirs = list(include_dirs) # copy before modify
include_dirs += os.environ.get('INCLUDE', '.').split(os.pathsep)
for dir in include_dirs:
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def check_include_file(include_dirs, filename, package):
"""
Raises an exception if the given include file can not be found.
"""
if not has_include_file(include_dirs, filename):
raise CheckFailed(
"The C/C++ header for %s (%s) could not be found. You "
"may need to install the development package." %
(package, filename))
def get_base_dirs():
"""
Returns a list of standard base directories on this platform.
"""
if options['basedirlist']:
return options['basedirlist']
if os.environ.get('MPLBASEDIRLIST'):
return os.environ.get('MPLBASEDIRLIST').split(os.pathsep)
win_bases = ['win32_static', ]
# on conda windows, we also add the <conda_env_dir>\Library,
# as conda installs libs/includes there
# env var names mess: https://github.com/conda/conda/issues/2312
conda_env_path = os.getenv('CONDA_PREFIX') # conda >= 4.1
if not conda_env_path:
conda_env_path = os.getenv('CONDA_DEFAULT_ENV') # conda < 4.1
if conda_env_path and os.path.isdir(conda_env_path):
win_bases.append(os.path.join(conda_env_path, "Library"))
basedir_map = {
'win32': win_bases,
'darwin': ['/usr/local/', '/usr', '/usr/X11',
'/opt/X11', '/opt/local'],
'sunos5': [os.getenv('MPLIB_BASE') or '/usr/local', ],
'gnu0': ['/usr'],
'aix5': ['/usr/local'],
}
return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
def get_include_dirs():
"""
Returns a list of standard include directories on this platform.
"""
include_dirs = [os.path.join(d, 'include') for d in get_base_dirs()]
if sys.platform != 'win32':
# gcc includes this dir automatically, so also look for headers in
# these dirs
include_dirs.extend(
os.environ.get('CPLUS_INCLUDE_PATH', '').split(os.pathsep))
return include_dirs
def is_min_version(found, minversion):
"""
Returns `True` if `found` is at least as high a version as
`minversion`.
"""
expected_version = version.LooseVersion(minversion)
found_version = version.LooseVersion(found)
return found_version >= expected_version
# Define the display functions only if display_status is True.
if options['display_status']:
def print_line(char='='):
print(char * 76)
def print_status(package, status):
initial_indent = "%22s: " % package
indent = ' ' * 24
print(fill(str(status), width=76,
initial_indent=initial_indent,
subsequent_indent=indent))
def print_message(message):
indent = ' ' * 24 + "* "
print(fill(str(message), width=76,
initial_indent=indent,
subsequent_indent=indent))
def print_raw(section):
print(section)
else:
def print_line(*args, **kwargs):
pass
print_status = print_message = print_raw = print_line
# Remove the -Wstrict-prototypes option, is it's not valid for C++
customize_compiler = distutils.command.build_ext.customize_compiler
def my_customize_compiler(compiler):
retval = customize_compiler(compiler)
try:
compiler.compiler_so.remove('-Wstrict-prototypes')
except (ValueError, AttributeError):
pass
return retval
distutils.command.build_ext.customize_compiler = my_customize_compiler
def make_extension(name, files, *args, **kwargs):
"""
Make a new extension. Automatically sets include_dirs and
library_dirs to the base directories appropriate for this
platform.
`name` is the name of the extension.
`files` is a list of source files.
Any additional arguments are passed to the
`distutils.core.Extension` constructor.
"""
ext = DelayedExtension(name, files, *args, **kwargs)
for dir in get_base_dirs():
include_dir = os.path.join(dir, 'include')
if os.path.exists(include_dir):
ext.include_dirs.append(include_dir)
for lib in ('lib', 'lib64'):
lib_dir = os.path.join(dir, lib)
if os.path.exists(lib_dir):
ext.library_dirs.append(lib_dir)
ext.include_dirs.append('.')
return ext
def get_file_hash(filename):
"""
Get the SHA256 hash of a given filename.
"""
import hashlib
BLOCKSIZE = 1 << 16
hasher = hashlib.sha256()
with open(filename, 'rb') as fd:
buf = fd.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(BLOCKSIZE)
return hasher.hexdigest()
class PkgConfig(object):
"""
This is a class for communicating with pkg-config.
"""
def __init__(self):
"""
Determines whether pkg-config exists on this machine.
"""
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
try:
self.pkg_config = os.environ['PKG_CONFIG']
except KeyError:
self.pkg_config = 'pkg-config'
self.set_pkgconfig_path()
status, output = getstatusoutput(self.pkg_config + " --help")
self.has_pkgconfig = (status == 0)
if not self.has_pkgconfig:
print("IMPORTANT WARNING:")
print(
" pkg-config is not installed.\n"
" matplotlib may not be able to find some of its dependencies")
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
try:
os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
except KeyError:
os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
def setup_extension(self, ext, package, default_include_dirs=[],
default_library_dirs=[], default_libraries=[],
alt_exec=None):
"""
Add parameters to the given `ext` for the given `package`.
"""
flag_map = {
'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
executable = alt_exec
if self.has_pkgconfig:
executable = (self.pkg_config + ' {0}').format(package)
use_defaults = True
if executable is not None:
command = "{0} --libs --cflags ".format(executable)
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
else:
output = output.decode(sys.getfilesystemencoding())
use_defaults = False
for token in output.split():
attr = flag_map.get(token[:2])
if attr is not None:
getattr(ext, attr).insert(0, token[2:])
if use_defaults:
basedirs = get_base_dirs()
for base in basedirs:
for include in default_include_dirs:
dir = os.path.join(base, include)
if os.path.exists(dir):
ext.include_dirs.append(dir)
for lib in default_library_dirs:
dir = os.path.join(base, lib)
if os.path.exists(dir):
ext.library_dirs.append(dir)
ext.libraries.extend(default_libraries)
return True
return False
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
status, output = getstatusoutput(
self.pkg_config + " %s --modversion" % (package))
if status == 0:
return output
return None
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class CheckFailed(Exception):
"""
Exception thrown when a `SetupPackage.check` method fails.
"""
pass
class SetupPackage(object):
optional = False
pkg_names = {
"apt-get": None,
"yum": None,
"dnf": None,
"brew": None,
"port": None,
"windows_url": None
}
def check(self):
"""
Checks whether the build dependencies are met. Should raise a
`CheckFailed` exception if the dependency could not be met, otherwise
return a string indicating a version number or some other message
indicating what was found.
"""
pass
def runtime_check(self):
"""
True if the runtime dependencies of the backend are met. Assumes that
the build-time dependencies are met.
"""
return True
def get_packages(self):
"""
Get a list of package names to add to the configuration.
These are added to the `packages` list passed to
`distutils.setup`.
"""
return []
def get_namespace_packages(self):
"""
Get a list of namespace package names to add to the configuration.
These are added to the `namespace_packages` list passed to
`distutils.setup`.
"""
return []
def get_py_modules(self):
"""
Get a list of top-level modules to add to the configuration.
These are added to the `py_modules` list passed to
`distutils.setup`.
"""
return []
def get_package_data(self):
"""
Get a package data dictionary to add to the configuration.
These are merged into to the `package_data` list passed to
`distutils.setup`.
"""
return {}
def get_extension(self):
"""
Get a list of C extensions (`distutils.core.Extension`
objects) to add to the configuration. These are added to the
`extensions` list passed to `distutils.setup`.
"""
return None
def get_install_requires(self):
"""
Get a list of Python packages that we require.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Get a list of Python packages that we require at build time.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def _check_for_pkg_config(self, package, include_file, min_version=None,
version=None):
"""
A convenience function for writing checks for a
pkg_config-defined dependency.
`package` is the pkg_config package name.
`include_file` is a top-level include file we expect to find.
`min_version` is the minimum version required.
`version` will override the found version if this package
requires an alternate method for that. Set version='unknown'
if the version is not known but you still want to disabled
pkg_config version check.
"""
if version is None:
version = pkg_config.get_version(package)
if version is None:
raise CheckFailed(
"pkg-config information for '%s' could not be found." %
package)
if min_version == 'PATCH':
raise CheckFailed(
"Requires patches that have not been merged upstream.")
if min_version and version != 'unknown':
if (not is_min_version(version, min_version)):
raise CheckFailed(
"Requires %s %s or later. Found %s." %
(package, min_version, version))
ext = self.get_extension()
if ext is None:
ext = make_extension('test', [])
pkg_config.setup_extension(ext, package)
check_include_file(
ext.include_dirs + get_include_dirs(), include_file, package)
return 'version %s' % version
def do_custom_build(self):
"""
If a package needs to do extra custom things, such as building a
third-party library, before building an extension, it should
override this method.
"""
pass
def install_help_msg(self):
"""
Do not override this method !
Generate the help message to show if the package is not installed.
To use this in subclasses, simply add the dictionary `pkg_names` as
a class variable:
pkg_names = {
"apt-get": <Name of the apt-get package>,
"yum": <Name of the yum package>,
"dnf": <Name of the dnf package>,
"brew": <Name of the brew package>,
"port": <Name of the port package>,
"windows_url": <The url which has installation instructions>
}
All the dictionary keys are optional. If a key is not present or has
the value `None` no message is provided for that platform.
"""
def _try_managers(*managers):
for manager in managers:
pkg_name = self.pkg_names.get(manager, None)
if pkg_name:
try:
# `shutil.which()` can be used when Python 2.7 support
# is dropped. It is available in Python 3.3+
_ = check_output(["which", manager],
stderr=subprocess.STDOUT)
if manager == 'port':
pkgconfig = 'pkgconfig'
else:
pkgconfig = 'pkg-config'
return ('Try installing {0} with `{1} install {2}` '
'and pkg-config with `{1} install {3}`'
.format(self.name, manager, pkg_name,
pkgconfig))
except subprocess.CalledProcessError:
pass
message = None
if sys.platform == "win32":
url = self.pkg_names.get("windows_url", None)
if url:
message = ('Please check {0} for instructions to install {1}'
.format(url, self.name))
elif sys.platform == "darwin":
message = _try_managers("brew", "port")
elif sys.platform.startswith("linux"):
release = platform.linux_distribution()[0].lower()
if release in ('debian', 'ubuntu'):
message = _try_managers('apt-get')
elif release in ('centos', 'redhat', 'fedora'):
message = _try_managers('dnf', 'yum')
return message
class OptionalPackage(SetupPackage):
optional = True
force = False
config_category = "packages"
default_config = "auto"
@classmethod
def get_config(cls):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (case
insensitively defined as 1, true, yes, on for True) or opted-out (case
insensitively defined as 0, false, no, off for False).
"""
conf = cls.default_config
if config is not None and config.has_option(cls.config_category, cls.name):
try:
conf = config.getboolean(cls.config_category, cls.name)
except ValueError:
conf = config.get(cls.config_category, cls.name)
return conf
def check(self):
"""
Do not override this method!
For custom dependency checks override self.check_requirements().
Two things are checked: Configuration file and requirements.
"""
# Check configuration file
conf = self.get_config()
# Default "auto" state or install forced by user
if conf in [True, 'auto']:
message = "installing"
# Set non-optional if user sets `True` in config
if conf is True:
self.optional = False
# Configuration opt-out by user
else:
# Some backend extensions (e.g. Agg) need to be built for certain
# other GUI backends (e.g. TkAgg) even when manually disabled
if self.force is True:
message = "installing forced (config override)"
else:
raise CheckFailed("skipping due to configuration")
# Check requirements and add extra information (if any) to message.
# If requirements are not met a CheckFailed should be raised in there.
additional_info = self.check_requirements()
if additional_info:
message += ", " + additional_info
# No CheckFailed raised until now, return install message.
return message
def check_requirements(self):
"""
Override this method to do custom dependency checks.
- Raise CheckFailed() if requirements are not met.
- Return message with additional information, or an empty string
(or None) for no additional information.
"""
return ""
class OptionalBackendPackage(OptionalPackage):
config_category = "gui_support"
class Platform(SetupPackage):
name = "platform"
def check(self):
return sys.platform
class Python(SetupPackage):
name = "python"
def check(self):
major, minor1, minor2, s, tmp = sys.version_info
if major < 2:
raise CheckFailed(
"Requires Python 2.7 or later")
elif major == 2 and minor1 < 7:
raise CheckFailed(
"Requires Python 2.7 or later (in the 2.x series)")
elif major == 3 and minor1 < 4:
raise CheckFailed(
"Requires Python 3.4 or later (in the 3.x series)")
return sys.version
class Matplotlib(SetupPackage):
name = "matplotlib"
def check(self):
return versioneer.get_version()
def get_packages(self):
return [
'matplotlib',
'matplotlib.backends',
'matplotlib.backends.qt_editor',
'matplotlib.compat',
'matplotlib.projections',
'matplotlib.axes',
'matplotlib.sphinxext',
'matplotlib.style',
'matplotlib.testing',
'matplotlib.testing._nose',
'matplotlib.testing._nose.plugins',
'matplotlib.testing.jpl_units',
'matplotlib.tri',
'matplotlib.cbook'
]
def get_py_modules(self):
return ['pylab']
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/fonts/afm/*.afm',
'mpl-data/fonts/pdfcorefonts/*.afm',
'mpl-data/fonts/pdfcorefonts/*.txt',
'mpl-data/fonts/ttf/*.ttf',
'mpl-data/fonts/ttf/LICENSE_STIX',
'mpl-data/fonts/ttf/COPYRIGHT.TXT',
'mpl-data/fonts/ttf/README.TXT',
'mpl-data/fonts/ttf/RELEASENOTES.TXT',
'mpl-data/images/*.xpm',
'mpl-data/images/*.svg',
'mpl-data/images/*.gif',
'mpl-data/images/*.pdf',
'mpl-data/images/*.png',
'mpl-data/images/*.ppm',
'mpl-data/example/*.npy',
'mpl-data/matplotlibrc',
'backends/web_backend/*.*',
'backends/web_backend/js/*.*',
'backends/web_backend/jquery/js/*.min.js',
'backends/web_backend/jquery/css/themes/base/*.min.css',
'backends/web_backend/jquery/css/themes/base/images/*',
'backends/web_backend/css/*.*',
'backends/Matplotlib.nib/*',
'mpl-data/stylelib/*.mplstyle',
]}
class SampleData(OptionalPackage):
"""
This handles the sample data that ships with matplotlib. It is
technically optional, though most often will be desired.
"""
name = "sample_data"
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/sample_data/*.*',
'mpl-data/sample_data/axes_grid/*.*',
]}
class Toolkits(OptionalPackage):
name = "toolkits"
def get_packages(self):
return [
'mpl_toolkits',
'mpl_toolkits.mplot3d',
'mpl_toolkits.axes_grid',
'mpl_toolkits.axes_grid1',
'mpl_toolkits.axisartist',
]
def get_namespace_packages(self):
return ['mpl_toolkits']
class Tests(OptionalPackage):
name = "tests"
pytest_min_version = '3.0.0'
default_config = False
def check(self):
super(Tests, self).check()
msgs = []
msg_template = ('{package} is required to run the Matplotlib test '
'suite. Please install it with pip or your preferred '
'tool to run the test suite')
bad_pytest = msg_template.format(
package='pytest %s or later' % self.pytest_min_version
)
try:
import pytest
if is_min_version(pytest.__version__, self.pytest_min_version):
msgs += ['using pytest version %s' % pytest.__version__]
else:
msgs += [bad_pytest]
except ImportError:
msgs += [bad_pytest]
if PY3min:
msgs += ['using unittest.mock']
else:
try:
import mock
msgs += ['using mock %s' % mock.__version__]
except ImportError:
msgs += [msg_template.format(package='mock')]
return ' / '.join(msgs)
def get_packages(self):
return [
'matplotlib.tests',
'matplotlib.sphinxext.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/matplotlib/tests/baseline_images')]
return {
'matplotlib':
baseline_images +
[
'tests/cmr10.pfb',
'tests/mpltest.ttf',
'tests/test_rcparams.rc',
'tests/test_utf32_be_rcparams.rc',
'sphinxext/tests/tinypages/*.rst',
'sphinxext/tests/tinypages/*.py',
'sphinxext/tests/tinypages/_static/*',
]}
class Toolkits_Tests(Tests):
name = "toolkits_tests"
def check_requirements(self):
conf = self.get_config()
toolkits_conf = Toolkits.get_config()
tests_conf = Tests.get_config()
if conf is True:
Tests.force = True
Toolkits.force = True
elif conf == "auto" and not (toolkits_conf and tests_conf):
# Only auto-install if both toolkits and tests are set
# to be installed
raise CheckFailed("toolkits_tests needs 'toolkits' and 'tests'")
return ""
def get_packages(self):
return [
'mpl_toolkits.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/mpl_toolkits/tests/baseline_images')]
return {'mpl_toolkits': baseline_images}
def get_namespace_packages(self):
return ['mpl_toolkits']
class DelayedExtension(Extension, object):
"""
A distutils Extension subclass where some of its members
may have delayed computation until reaching the build phase.
This is so we can, for example, get the Numpy include dirs
after pip has installed Numpy for us if it wasn't already
on the system.
"""
def __init__(self, *args, **kwargs):
super(DelayedExtension, self).__init__(*args, **kwargs)
self._finalized = False
self._hooks = {}
def add_hook(self, member, func):
"""
Add a hook to dynamically compute a member.
Parameters
----------
member : string
The name of the member
func : callable
The function to call to get dynamically-computed values
for the member.
"""
self._hooks[member] = func
def finalize(self):
self._finalized = True
class DelayedMember(property):
def __init__(self, name):
self._name = name
def __get__(self, obj, objtype=None):
result = getattr(obj, '_' + self._name, [])
if obj._finalized:
if self._name in obj._hooks:
result = obj._hooks[self._name]() + result
return result
def __set__(self, obj, value):
setattr(obj, '_' + self._name, value)
include_dirs = DelayedMember('include_dirs')
class Numpy(SetupPackage):
name = "numpy"
@staticmethod
def include_dirs_hook():
if PY3min:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
else:
import __builtin__
if hasattr(__builtin__, '__NUMPY_SETUP__'):
del __builtin__.__NUMPY_SETUP__
import numpy
reload(numpy)
ext = Extension('test', [])
ext.include_dirs.append(numpy.get_include())
if not has_include_file(
ext.include_dirs, os.path.join("numpy", "arrayobject.h")):
warnings.warn(
"The C headers for numpy could not be found. "
"You may need to install the development package")
return [numpy.get_include()]
def check(self):
min_version = extract_versions()['__version__numpy__']
try:
import numpy
except ImportError:
return 'not found. pip may install it below.'
if not is_min_version(numpy.__version__, min_version):
raise SystemExit(
"Requires numpy %s or later to build. (Found %s)" %
(min_version, numpy.__version__))
return 'version %s' % numpy.__version__
def add_flags(self, ext):
# Ensure that PY_ARRAY_UNIQUE_SYMBOL is uniquely defined for
# each extension
array_api_name = 'MPL_' + ext.name.replace('.', '_') + '_ARRAY_API'
ext.define_macros.append(('PY_ARRAY_UNIQUE_SYMBOL', array_api_name))
ext.add_hook('include_dirs', self.include_dirs_hook)
ext.define_macros.append(('NPY_NO_DEPRECATED_API',
'NPY_1_7_API_VERSION'))
# Allow NumPy's printf format specifiers in C++.
ext.define_macros.append(('__STDC_FORMAT_MACROS', 1))
def get_setup_requires(self):
return ['numpy>=1.7.1']
def get_install_requires(self):
return ['numpy>=1.7.1']
class LibAgg(SetupPackage):
name = 'libagg'
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'libagg', 'agg2/agg_basics.h', min_version='PATCH')
except CheckFailed as e:
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext, add_sources=True):
if self.found_external:
pkg_config.setup_extension(ext, 'libagg')
else:
ext.include_dirs.insert(0, 'extern/agg24-svn/include')
if add_sources:
agg_sources = [
'agg_bezier_arc.cpp',
'agg_curves.cpp',
'agg_image_filters.cpp',
'agg_trans_affine.cpp',
'agg_vcgen_contour.cpp',
'agg_vcgen_dash.cpp',
'agg_vcgen_stroke.cpp',
'agg_vpgen_segmentator.cpp'
]
ext.sources.extend(
os.path.join('extern', 'agg24-svn', 'src', x) for x in agg_sources)
class FreeType(SetupPackage):
name = "freetype"
pkg_names = {
"apt-get": "libfreetype6-dev",
"yum": "freetype-devel",
"dnf": "freetype-devel",
"brew": "freetype",
"port": "freetype",
"windows_url": "http://gnuwin32.sourceforge.net/packages/freetype.htm"
}
def check(self):
if options.get('local_freetype'):
return "Using local version for testing"
if sys.platform == 'win32':
try:
check_include_file(get_include_dirs(), 'ft2build.h', 'freetype')
except CheckFailed:
check_include_file(get_include_dirs(), 'freetype2\\ft2build.h', 'freetype')
return 'Using unknown version found on system.'
status, output = getstatusoutput("freetype-config --ftversion")
if status == 0:
version = output
else:
version = None
# Early versions of freetype grep badly inside freetype-config,
# so catch those cases. (tested with 2.5.3).
if version is None or 'No such file or directory\ngrep:' in version:
version = self.version_from_header()
# pkg_config returns the libtool version rather than the
# freetype version so we need to explicitly pass the version
# to _check_for_pkg_config
return self._check_for_pkg_config(
'freetype2', 'ft2build.h',
min_version='2.3', version=version)
def version_from_header(self):
version = 'unknown'
ext = self.get_extension()
if ext is None:
return version
# Return the first version found in the include dirs.
for include_dir in ext.include_dirs:
header_fname = os.path.join(include_dir, 'freetype.h')
if os.path.exists(header_fname):
major, minor, patch = 0, 0, 0
with open(header_fname, 'r') as fh:
for line in fh:
if line.startswith('#define FREETYPE_'):
value = line.rsplit(' ', 1)[1].strip()
if 'MAJOR' in line:
major = value
elif 'MINOR' in line:
minor = value
else:
patch = value
return '.'.join([major, minor, patch])
def add_flags(self, ext):
if options.get('local_freetype'):
src_path = os.path.join(
'build', 'freetype-{0}'.format(LOCAL_FREETYPE_VERSION))
# Statically link to the locally-built freetype.
# This is certainly broken on Windows.
ext.include_dirs.insert(0, os.path.join(src_path, 'include'))
if sys.platform == 'win32':
libfreetype = 'libfreetype.lib'
else:
libfreetype = 'libfreetype.a'
ext.extra_objects.insert(
0, os.path.join(src_path, 'objs', '.libs', libfreetype))
ext.define_macros.append(('FREETYPE_BUILD_TYPE', 'local'))
else:
pkg_config.setup_extension(
ext, 'freetype2',
default_include_dirs=[
'include/freetype2', 'freetype2',
'lib/freetype2/include',
'lib/freetype2/include/freetype2'],
default_library_dirs=[
'freetype2/lib'],
default_libraries=['freetype', 'z'])
ext.define_macros.append(('FREETYPE_BUILD_TYPE', 'system'))
def do_custom_build(self):
# We're using a system freetype
if not options.get('local_freetype'):
return
src_path = os.path.join(
'build', 'freetype-{0}'.format(LOCAL_FREETYPE_VERSION))
# We've already built freetype
if sys.platform == 'win32':
libfreetype = 'libfreetype.lib'
else:
libfreetype = 'libfreetype.a'
if os.path.isfile(os.path.join(src_path, 'objs', '.libs', libfreetype)):
return
tarball = 'freetype-{0}.tar.gz'.format(LOCAL_FREETYPE_VERSION)
tarball_path = os.path.join('build', tarball)
try:
tarball_cache_dir = _get_xdg_cache_dir()
tarball_cache_path = os.path.join(tarball_cache_dir, tarball)
except:
# again, do not really care if this fails
tarball_cache_dir = None
tarball_cache_path = None
if not os.path.isfile(tarball_path):
if (tarball_cache_path is not None and
os.path.isfile(tarball_cache_path)):
if get_file_hash(tarball_cache_path) == LOCAL_FREETYPE_HASH:
try:
os.makedirs('build')
except OSError:
# Don't care if it exists.
pass
try:
shutil.copy(tarball_cache_path, tarball_path)
print('Using cached tarball: {}'
.format(tarball_cache_path))
except OSError:
# If this fails, oh well just re-download
pass
if not os.path.isfile(tarball_path):
if PY3min:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
if not os.path.exists('build'):
os.makedirs('build')
url_fmts = [
'https://downloads.sourceforge.net/project/freetype'
'/freetype2/{version}/{tarball}',
'https://download.savannah.gnu.org/releases/freetype'
'/{tarball}'
]
for url_fmt in url_fmts:
tarball_url = url_fmt.format(
version=LOCAL_FREETYPE_VERSION, tarball=tarball)
print("Downloading {0}".format(tarball_url))
try:
urlretrieve(tarball_url, tarball_path)
except IOError: # URLError (a subclass) on Py3.
print("Failed to download {0}".format(tarball_url))
else:
if get_file_hash(tarball_path) != LOCAL_FREETYPE_HASH:
print("Invalid hash.")
else:
break
else:
raise IOError("Failed to download freetype. "
"You can download the file by "
"alternative means and copy it "
" to '{0}'".format(tarball_path))
try:
os.makedirs(tarball_cache_dir)
except OSError:
# Don't care if it exists.
pass
try:
shutil.copy(tarball_path, tarball_cache_path)
print('Cached tarball at: {}'.format(tarball_cache_path))
except OSError:
# If this fails, we can always re-download.
pass
if get_file_hash(tarball_path) != LOCAL_FREETYPE_HASH:
raise IOError(
"{0} does not match expected hash.".format(tarball))
print("Building {0}".format(tarball))
if sys.platform != 'win32':
# compilation on all other platforms than windows
cflags = 'CFLAGS="{0} -fPIC" '.format(os.environ.get('CFLAGS', ''))
subprocess.check_call(
['tar', 'zxf', tarball], cwd='build')
subprocess.check_call(
[cflags + './configure --with-zlib=no --with-bzip2=no '
'--with-png=no --with-harfbuzz=no'], shell=True, cwd=src_path)
subprocess.check_call(
[cflags + 'make'], shell=True, cwd=src_path)
else:
# compilation on windows
FREETYPE_BUILD_CMD = """\
call "%ProgramFiles%\\Microsoft SDKs\\Windows\\v7.0\\Bin\\SetEnv.Cmd" /Release /{xXX} /xp
call "{vcvarsall}" {xXX}
set MSBUILD=C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\MSBuild.exe
rd /S /Q %FREETYPE%\\objs
%MSBUILD% %FREETYPE%\\builds\\windows\\{vc20xx}\\freetype.sln /t:Clean;Build /p:Configuration="{config}";Platform={WinXX}
echo Build completed, moving result"
:: move to the "normal" path for the unix builds...
mkdir %FREETYPE%\\objs\\.libs
:: REMINDER: fix when changing the version
copy %FREETYPE%\\objs\\{vc20xx}\\{xXX}\\freetype261.lib %FREETYPE%\\objs\\.libs\\libfreetype.lib
if errorlevel 1 (
rem This is a py27 version, which has a different location for the lib file :-/
copy %FREETYPE%\\objs\\win32\\{vc20xx}\\freetype261.lib %FREETYPE%\\objs\\.libs\\libfreetype.lib
)
"""
from setup_external_compile import fixproj, prepare_build_cmd, VS2010, X64, tar_extract
# Note: freetype has no build profile for 2014, so we don't bother...
vc = 'vc2010' if VS2010 else 'vc2008'
WinXX = 'x64' if X64 else 'Win32'
tar_extract(tarball_path, "build")
# This is only false for py2.7, even on py3.5...
if not VS2010:
fixproj(os.path.join(src_path, 'builds', 'windows', vc, 'freetype.sln'), WinXX)
fixproj(os.path.join(src_path, 'builds', 'windows', vc, 'freetype.vcproj'), WinXX)
cmdfile = os.path.join("build", 'build_freetype.cmd')
with open(cmdfile, 'w') as cmd:
cmd.write(prepare_build_cmd(FREETYPE_BUILD_CMD, vc20xx=vc, WinXX=WinXX,
config='Release' if VS2010 else 'LIB Release'))
os.environ['FREETYPE'] = src_path
subprocess.check_call([cmdfile], shell=True)
class FT2Font(SetupPackage):
name = 'ft2font'
def get_extension(self):
sources = [
'src/ft2font.cpp',
'src/ft2font_wrapper.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.ft2font', sources)
FreeType().add_flags(ext)
Numpy().add_flags(ext)
return ext
class Png(SetupPackage):
name = "png"
pkg_names = {
"apt-get": "libpng12-dev",
"yum": "libpng-devel",
"dnf": "libpng-devel",
"brew": "libpng",
"port": "libpng",
"windows_url": "http://gnuwin32.sourceforge.net/packages/libpng.htm"
}
def check(self):
if sys.platform == 'win32':
check_include_file(get_include_dirs(), 'png.h', 'png')
return 'Using unknown version found on system.'
status, output = getstatusoutput("libpng-config --version")
if status == 0:
version = output
else:
version = None
try:
return self._check_for_pkg_config(
'libpng', 'png.h',
min_version='1.2', version=version)
except CheckFailed as e:
if has_include_file(get_include_dirs(), 'png.h'):
return str(e) + ' Using unknown version found on system.'
raise
def get_extension(self):
sources = [
'src/_png.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib._png', sources)
pkg_config.setup_extension(
ext, 'libpng', default_libraries=['png', 'z'],
alt_exec='libpng-config --ldflags')
Numpy().add_flags(ext)
return ext
class Qhull(SetupPackage):
name = "qhull"
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'libqhull', 'libqhull/qhull_a.h', min_version='2015.2')
except CheckFailed as e:
self.__class__.found_pkgconfig = False
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext):
if self.found_external:
pkg_config.setup_extension(ext, 'qhull',
default_libraries=['qhull'])
else:
ext.include_dirs.insert(0, 'extern')
ext.sources.extend(sorted(glob.glob('extern/libqhull/*.c')))
class TTConv(SetupPackage):
name = "ttconv"
def get_extension(self):
sources = [
'src/_ttconv.cpp',
'extern/ttconv/pprdrv_tt.cpp',
'extern/ttconv/pprdrv_tt2.cpp',
'extern/ttconv/ttutil.cpp'
]
ext = make_extension('matplotlib.ttconv', sources)
Numpy().add_flags(ext)
ext.include_dirs.insert(0, 'extern')
return ext
class Path(SetupPackage):
name = "path"
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_path_wrapper.cpp'
]
ext = make_extension('matplotlib._path', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class Image(SetupPackage):
name = "image"
def get_extension(self):
sources = [
'src/_image.cpp',
'src/mplutils.cpp',
'src/_image_wrapper.cpp',
'src/py_converters.cpp'
]
ext = make_extension('matplotlib._image', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class Contour(SetupPackage):
name = "contour"
def get_extension(self):
sources = [
"src/_contour.cpp",
"src/_contour_wrapper.cpp",
]
ext = make_extension('matplotlib._contour', sources)
Numpy().add_flags(ext)
return ext
class QhullWrap(SetupPackage):
name = "qhull_wrap"
def get_extension(self):
sources = ['src/qhull_wrap.c']
ext = make_extension('matplotlib._qhull', sources,
define_macros=[('MPL_DEVNULL', os.devnull)])
Numpy().add_flags(ext)
Qhull().add_flags(ext)
return ext
class Tri(SetupPackage):
name = "tri"
def get_extension(self):
sources = [
"lib/matplotlib/tri/_tri.cpp",
"lib/matplotlib/tri/_tri_wrapper.cpp",
"src/mplutils.cpp"
]
ext = make_extension('matplotlib._tri', sources)
Numpy().add_flags(ext)
return ext
class InstallRequires(SetupPackage):
name = "install_requires"
def check(self):
return "handled by setuptools"
def get_install_requires(self):
install_requires = [
"cycler>=0.10",
"pyparsing>=2.0.1,!=2.0.4,!=2.1.2,!=2.1.6",
"python-dateutil>=2.0",
"pytz",
"six>=1.10",
]
if sys.version_info < (3,):
install_requires += ["backports.functools_lru_cache"]
if sys.version_info < (3,) and os.name == "posix":
install_requires += ["subprocess32"]
return install_requires
class BackendAgg(OptionalBackendPackage):
name = "agg"
force = True
def get_extension(self):
sources = [
"src/mplutils.cpp",
"src/py_converters.cpp",
"src/_backend_agg.cpp",
"src/_backend_agg_wrapper.cpp"
]
ext = make_extension('matplotlib.backends._backend_agg', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
FreeType().add_flags(ext)
return ext
class BackendTkAgg(OptionalBackendPackage):
name = "tkagg"
force = True
def check(self):
return "installing; run-time loading from Python Tcl / Tk"
def runtime_check(self):
""" Checks whether TkAgg runtime dependencies are met
"""
pkg_name = 'tkinter' if PY3min else 'Tkinter'
try:
import_module(pkg_name)
except ImportError:
return False
return True
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_tkagg.cpp'
]
ext = make_extension('matplotlib.backends._tkagg', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
LibAgg().add_flags(ext, add_sources=False)
return ext
def add_flags(self, ext):
ext.include_dirs.insert(0, 'src')
if sys.platform == 'win32':
# PSAPI library needed for finding Tcl / Tk at run time
ext.libraries.extend(['psapi'])
class BackendGtk(OptionalBackendPackage):
name = "gtk"
def check_requirements(self):
try:
import gtk
except ImportError:
raise CheckFailed("Requires pygtk")
except RuntimeError:
raise CheckFailed('pygtk present, but import failed.')
else:
version = (2, 2, 0)
if gtk.pygtk_version < version:
raise CheckFailed(
"Requires pygtk %d.%d.%d or later. "
"Found %d.%d.%d" % (version + gtk.pygtk_version))
ext = self.get_extension()
self.add_flags(ext)
check_include_file(ext.include_dirs,
os.path.join("gtk", "gtk.h"),
'gtk')
check_include_file(ext.include_dirs,
os.path.join("pygtk", "pygtk.h"),
'pygtk')
return 'Gtk: %s pygtk: %s' % (
".".join(str(x) for x in gtk.gtk_version),
".".join(str(x) for x in gtk.pygtk_version))
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/_backend_gdk.c'
]
ext = make_extension('matplotlib.backends._backend_gdk', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
return ext
def add_flags(self, ext):
if sys.platform == 'win32':
def getoutput(s):
ret = os.popen(s).read().strip()
return ret
if 'PKG_CONFIG_PATH' not in os.environ:
# If Gtk+ is installed, pkg-config is required to be installed
os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
# popen broken on my win32 plaform so I can't use pkgconfig
ext.library_dirs.extend(
['C:/GTK/bin', 'C:/GTK/lib'])
ext.include_dirs.extend(
['win32_static/include/pygtk-2.0',
'C:/GTK/include',
'C:/GTK/include/gobject',
'C:/GTK/include/gext',
'C:/GTK/include/glib',
'C:/GTK/include/pango',
'C:/GTK/include/atk',
'C:/GTK/include/X11',
'C:/GTK/include/cairo',
'C:/GTK/include/gdk',
'C:/GTK/include/gdk-pixbuf',
'C:/GTK/include/gtk',
])
pygtkIncludes = getoutput(
'pkg-config --cflags-only-I pygtk-2.0').split()
gtkIncludes = getoutput(
'pkg-config --cflags-only-I gtk+-2.0').split()
includes = pygtkIncludes + gtkIncludes
ext.include_dirs.extend([include[2:] for include in includes])
pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
linkerFlags = pygtkLinker + gtkLinker
ext.libraries.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-l')])
ext.library_dirs.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-L')])
ext.extra_link_args.extend(
[flag for flag in linkerFlags if not
(flag.startswith('-l') or flag.startswith('-L'))])
# visual studio doesn't need the math library
if (sys.platform == 'win32' and
win32_compiler == 'msvc' and
'm' in ext.libraries):
ext.libraries.remove('m')
elif sys.platform != 'win32':
pkg_config.setup_extension(ext, 'pygtk-2.0')
pkg_config.setup_extension(ext, 'gtk+-2.0')
class BackendGtkAgg(BackendGtk):
name = "gtkagg"
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_gtkagg.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.backends._gtkagg', sources)
self.add_flags(ext)
LibAgg().add_flags(ext)
Numpy().add_flags(ext)
return ext
def backend_gtk3agg_internal_check(x):
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (ImportError, RuntimeError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Agg(OptionalBackendPackage):
name = "gtk3agg"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3agg_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
# Some other error.
success = False
msg = "Could not determine"
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def backend_gtk3cairo_internal_check(x):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
return (False, "Requires cairocffi or pycairo to be installed.")
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (RuntimeError, ImportError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Cairo(OptionalBackendPackage):
name = "gtk3cairo"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3cairo_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
success = False
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
class BackendWxAgg(OptionalBackendPackage):
name = "wxagg"
def check_requirements(self):
wxversioninstalled = True
try:
import wxversion
except ImportError:
wxversioninstalled = False
if wxversioninstalled:
try:
_wx_ensure_failed = wxversion.AlreadyImportedError
except AttributeError:
_wx_ensure_failed = wxversion.VersionError
try:
wxversion.ensureMinimal('2.9')
except _wx_ensure_failed:
pass
try:
import wx
backend_version = wx.VERSION_STRING
except ImportError:
raise CheckFailed("requires wxPython")
if not is_min_version(backend_version, "2.9"):
raise CheckFailed(
"Requires wxPython 2.9, found %s" % backend_version)
return "version %s" % backend_version
class BackendMacOSX(OptionalBackendPackage):
name = 'macosx'
def check_requirements(self):
if sys.platform != 'darwin':
raise CheckFailed("Mac OS-X only")
return 'darwin'
def get_extension(self):
sources = [
'src/_macosx.m'
]
ext = make_extension('matplotlib.backends._macosx', sources)
ext.extra_link_args.extend(['-framework', 'Cocoa'])
return ext
class Windowing(OptionalBackendPackage):
"""
Builds the windowing extension.
"""
name = "windowing"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
config = self.get_config()
if config is False:
raise CheckFailed("skipping due to configuration")
return ""
def get_extension(self):
sources = [
"src/_windowing.cpp"
]
ext = make_extension('matplotlib._windowing', sources)
ext.include_dirs.extend(['C:/include'])
ext.libraries.extend(['user32'])
ext.library_dirs.extend(['C:/lib'])
ext.extra_link_args.append("-mwindows")
return ext
class BackendQtBase(OptionalBackendPackage):
def convert_qt_version(self, version):
version = '%x' % version
temp = []
while len(version) > 0:
version, chunk = version[:-2], version[-2:]
temp.insert(0, str(int(chunk, 16)))
return '.'.join(temp)
def check_requirements(self):
'''
If PyQt4/PyQt5 is already imported, importing PyQt5/PyQt4 will fail
so we need to test in a subprocess (as for Gtk3).
'''
try:
p = multiprocessing.Pool()
except:
# Can't do multiprocessing, fall back to normal approach
# (this will fail if importing both PyQt4 and PyQt5).
try:
# Try in-process
msg = self.callback(self)
except RuntimeError:
raise CheckFailed(
"Could not import: are PyQt4 & PyQt5 both installed?")
else:
# Multiprocessing OK
try:
res = p.map_async(self.callback, [self])
msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
raise CheckFailed("Check timed out")
except:
# Some other error.
p.close()
raise
else:
# Clean exit
p.close()
finally:
# Tidy up multiprocessing
p.join()
return msg
def backend_pyside_internal_check(self):
try:
from PySide import __version__
from PySide import QtCore
except ImportError:
raise CheckFailed("PySide not found")
else:
return ("Qt: %s, PySide: %s" %
(QtCore.__version__, __version__))
def backend_pyqt4_internal_check(self):
try:
from PyQt4 import QtCore
except ImportError:
raise CheckFailed("PyQt4 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.PYQT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
else:
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
def backend_qt4_internal_check(self):
successes = []
failures = []
try:
successes.append(backend_pyside_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
try:
successes.append(backend_pyqt4_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
if len(successes) == 0:
raise CheckFailed('; '.join(failures))
return '; '.join(successes + failures)
class BackendQt4(BackendQtBase):
name = "qt4agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt4_internal_check
def backend_pyside2_internal_check(self):
try:
from PySide2 import __version__
from PySide2 import QtCore
except ImportError:
raise CheckFailed("PySide2 not found")
else:
return ("Qt: %s, PySide2: %s" %
(QtCore.__version__, __version__))
def backend_pyqt5_internal_check(self):
try:
from PyQt5 import QtCore
except ImportError:
raise CheckFailed("PyQt5 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.PYQT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt5 not correctly imported')
else:
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
def backend_qt5_internal_check(self):
successes = []
failures = []
try:
successes.append(backend_pyside2_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
try:
successes.append(backend_pyqt5_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
if len(successes) == 0:
raise CheckFailed('; '.join(failures))
return '; '.join(successes + failures)
class BackendQt5(BackendQtBase):
name = "qt5agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt5_internal_check
class BackendCairo(OptionalBackendPackage):
name = "cairo"
def check_requirements(self):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
raise CheckFailed("cairocffi or pycairo not found")
else:
return "pycairo version %s" % cairo.version
else:
return "cairocffi version %s" % cairocffi.version
class DviPng(SetupPackage):
name = "dvipng"
optional = True
def check(self):
try:
output = check_output('dvipng -version', shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.splitlines()[1].decode().split()[-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
raise CheckFailed()
class Ghostscript(SetupPackage):
name = "ghostscript"
optional = True
def check(self):
if sys.platform == 'win32':
# mgs is the name in miktex
gs_execs = ['gswin32c', 'gswin64c', 'mgs', 'gs']
else:
gs_execs = ['gs']
for gs_exec in gs_execs:
try:
command = gs_exec + ' --version'
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.decode()[:-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
pass
raise CheckFailed()
class LaTeX(SetupPackage):
name = "latex"
optional = True
def check(self):
try:
output = check_output('latex -version', shell=True,
stderr=subprocess.STDOUT)
line = output.splitlines()[0].decode()
pattern = '(3\.1\d+)|(MiKTeX \d+.\d+)'
match = re.search(pattern, line)
return "version %s" % match.group(0)
except (IndexError, ValueError, AttributeError, subprocess.CalledProcessError):
raise CheckFailed()
class PdfToPs(SetupPackage):
name = "pdftops"
optional = True
def check(self):
try:
output = check_output('pdftops -v', shell=True,
stderr=subprocess.STDOUT)
for line in output.splitlines():
line = line.decode()
if 'version' in line:
return "version %s" % line.split()[2]
except (IndexError, ValueError, subprocess.CalledProcessError):
pass
raise CheckFailed()
class OptionalPackageData(OptionalPackage):
config_category = "package_data"
class Dlls(OptionalPackageData):
"""
On Windows, this packages any DLL files that can be found in the
lib/matplotlib/* directories.
"""
name = "dlls"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
def get_package_data(self):
return {'': ['*.dll']}
@classmethod
def get_config(cls):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (True)
or opted-out (False).
"""
try:
return config.getboolean(cls.config_category, cls.name)
except:
return False # <-- default
|
[
"[email protected]"
] | |
356d6496f9b5d3ef867d61fe4ea944f06311fa75
|
da9e1f7ef83345c4490e744c4bea01cbafe7c3f0
|
/catalog/test_utils.py
|
dbb85912ceed36c3cc341e4f20bef4d04764ddbc
|
[
"MIT"
] |
permissive
|
chriswilley/catalog
|
38328c1facb973c26ed562719b33e20bdbe1e1b0
|
09a664a1547a55bc20ff0c8108f9bde55ff10ce3
|
refs/heads/master
| 2022-12-02T15:06:11.801009 | 2020-03-24T16:26:24 | 2020-03-24T16:26:24 | 49,510,792 | 1 | 1 |
MIT
| 2022-11-22T04:28:14 | 2016-01-12T15:53:30 |
Python
|
UTF-8
|
Python
| false | false | 1,370 |
py
|
import json
import os
from catalog import app
def delete_test_file(filename):
"""Delete a named file used for testing purposes.
"""
file_path = os.path.join(
os.path.dirname(__file__), filename)
os.remove(file_path)
return
def get_google_client_id():
"""Since we have to do this a number of times while testing the Google
authentication process, save ourselves some typing by putting the
code in a callable function. Facebook API details are in config.py,
so we can just use app.config['FACEBOOK_CONFIG'] for that.
"""
client_id = json.loads(
open('instance/client_secrets.json', 'r').read())['web']['client_id']
return client_id
def save_google_secrets_test_files():
"""Generate JSON files for testing the Google authentication
process. Note that all we're doing is changing the token_uri
parameter.
"""
with open('instance/client_secrets.json', 'r') as f:
cs = json.loads(f.read())
cs['web']['token_uri'] = 'http://localhost:5000/test/get_access_token/'
with open('instance/client_secrets_test.json', 'w') as f2:
f2.write(json.dumps(cs))
url = 'http://localhost:5000/test/get_wrong_access_token/'
cs['web']['token_uri'] = url
with open('instance/client_secrets_bogus_test.json', 'w') as f3:
f3.write(json.dumps(cs))
return
|
[
"[email protected]"
] | |
a7e160e11c6dae2533059ec8221fb03be44a9eb8
|
821c1063078c22edc0b35a98d9634ad4a6d2f10a
|
/contacts/migrations/0001_initial.py
|
3225dc0a409a65be4c26266df6f1087141ff2191
|
[] |
no_license
|
shudii1/carzone-gitproject
|
888b19e8671843af74ffaee39a284754032a85f1
|
1b35e1873dab203eeb35d710ea3aa713280db145
|
refs/heads/master
| 2023-04-28T16:38:29.478805 | 2021-05-17T05:11:42 | 2021-05-17T05:11:42 | 334,718,960 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,222 |
py
|
# Generated by Django 3.0.7 on 2021-02-14 14:40
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('car_id', models.IntegerField()),
('customer_need', models.CharField(max_length=100)),
('car_title', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100)),
('phone', models.CharField(max_length=100)),
('message', models.TextField(blank=True)),
('user_id', models.IntegerField(blank=True)),
('create_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
[
"[email protected]"
] | |
72dff18867a5ecc45e8a6feb50567cf3be592ed6
|
6c951ca04d6c0db92b05972d651d370302d98a2c
|
/tests/test_sensitivity_analyzer.py
|
35a1db44b33b09a91687ae8644cb8603a1c9727c
|
[
"MIT"
] |
permissive
|
nickderobertis/sensitivity
|
9309bba0aadbac6e8dba09e7c7b1477d063a6d6d
|
8f0d0e676213772bdb8cbc8c6fc08fdba6dc6b53
|
refs/heads/master
| 2023-02-23T20:33:45.118907 | 2022-10-09T01:17:01 | 2022-10-09T01:17:01 | 239,607,375 | 12 | 0 |
MIT
| 2023-02-11T02:07:41 | 2020-02-10T20:33:30 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,995 |
py
|
import uuid
from pandas.testing import assert_frame_equal
from sensitivity import SensitivityAnalyzer
from tests.base import EXPECT_DF_TWO_VALUE, SENSITIVITY_VALUES_TWO_VALUE, add_5_to_values, RESULT_NAME, \
SENSITIVITY_VALUES_THREE_VALUE, add_10_to_values, EXPECT_DF_THREE_VALUE, assert_styled_matches, \
DF_STYLED_NUM_FMT_PATH, assert_graph_matches, PLOT_THREE_PATH, PLOT_OPTIONS_PATH, TWO_VALUE_LABELS, DF_LABELED_PATH
class TestSensitivityAnalyzer:
def create_sa(self, **kwargs) -> SensitivityAnalyzer:
sa_config = dict(
sensitivity_values=SENSITIVITY_VALUES_TWO_VALUE,
func=add_5_to_values,
result_name=RESULT_NAME
)
sa_config.update(**kwargs)
sa = SensitivityAnalyzer(**sa_config)
return sa
def test_create(self):
sa = self.create_sa()
def test_create_df(self):
sa = self.create_sa()
assert_frame_equal(sa.df, EXPECT_DF_TWO_VALUE, check_dtype=False)
def test_create_df_three_values(self):
sa = self.create_sa(
sensitivity_values=SENSITIVITY_VALUES_THREE_VALUE,
func=add_10_to_values,
)
assert_frame_equal(sa.df, EXPECT_DF_THREE_VALUE, check_dtype=False)
def test_create_styled_dfs(self):
sa = self.create_sa()
result = sa.styled_dfs()
assert_styled_matches(result)
def test_create_styled_dfs_with_num_fmt(self):
sa = self.create_sa(num_fmt='${:,.0f}')
result = sa.styled_dfs()
sa2 = self.create_sa()
result2 = sa2.styled_dfs(num_fmt='${:,.0f}')
assert_styled_matches(result, DF_STYLED_NUM_FMT_PATH)
assert_styled_matches(result2, DF_STYLED_NUM_FMT_PATH)
def test_create_styled_dfs_with_labels(self):
sa = self.create_sa(labels=TWO_VALUE_LABELS)
result = sa.styled_dfs()
assert_styled_matches(result, DF_LABELED_PATH)
def test_create_styled_dfs_three_values(self):
sa = self.create_sa(
sensitivity_values=SENSITIVITY_VALUES_THREE_VALUE,
func=add_10_to_values,
)
result = sa.styled_dfs()
def test_create_plot(self):
sa = self.create_sa()
result = sa.plot()
assert_graph_matches(result)
def test_create_plot_three_values(self):
sa = self.create_sa(
sensitivity_values=SENSITIVITY_VALUES_THREE_VALUE,
func=add_10_to_values,
)
result = sa.plot()
assert_graph_matches(result, file_path=PLOT_THREE_PATH)
def test_create_plot_with_options(self):
options = dict(
grid_size=2, color_map='viridis', reverse_colors=True
)
sa = self.create_sa(labels=TWO_VALUE_LABELS, **options)
result = sa.plot()
assert_graph_matches(result, file_path=PLOT_OPTIONS_PATH)
sa = self.create_sa(labels=TWO_VALUE_LABELS)
result = sa.plot(**options)
assert_graph_matches(result, file_path=PLOT_OPTIONS_PATH)
|
[
"[email protected]"
] | |
1c68371a7e2d8eaddb197d4d63eff1c8935ef143
|
5c8346597e3690eec3939f56f233eb5fafd336bc
|
/varsom_regobs_client/models/snow_temp_view_model.py
|
761a19ec81381882d6deee0093d85ef0c634d216
|
[] |
no_license
|
NVE/python-varsom-regobs-client
|
be44befd04ca07058f8b46ec69bf1659d3ee422b
|
8bb7fc06d2f6da36a5fa4a475d4f036ebe3cfd72
|
refs/heads/master
| 2022-12-27T19:09:54.761318 | 2020-06-24T08:56:15 | 2020-06-24T08:56:15 | 274,619,205 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,254 |
py
|
# coding: utf-8
"""
RegObs API
## Introduction RegObs is a tool for collecting observations and events related to natural hazards. It is currently used by the Norwegian flood, landslide and avalanche warning service in Norway, but the data is openly available for anyone through this API. Regobs has been developed by the Norwegian Water resources and Energy Directorate (NVE), in collaboration with the Norwegian Meteorological Institute (MET) and the Norwegian Public Roads Administration (NPRA). You can check out our representation of the data at [regobs.no](http://regobs.no). ## Authentication Some endpoints require an api key. You can get an API key by sending an email to [[email protected]](mailto:[email protected]?subject=RegObs%20API%20Key). To use the api key with the swagger ui, fill in the api\\_key input above. It should then be included with every request in the `regObs_apptoken` header. ## Getting started Get the last 10 observations using python: ```python import requests r = requests.post('https://api.regobs.no/v4/Search', data={'NumberOfRecords': 10}, headers={'Content-Type': 'application/json'} ) data = r.json() print(len(data)) # 10 ``` # noqa: E501
OpenAPI spec version: v4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SnowTempViewModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'layers': 'list[SnowTempObsViewModel]'
}
attribute_map = {
'layers': 'Layers'
}
def __init__(self, layers=None): # noqa: E501
"""SnowTempViewModel - a model defined in Swagger""" # noqa: E501
self._layers = None
self.discriminator = None
if layers is not None:
self.layers = layers
@property
def layers(self):
"""Gets the layers of this SnowTempViewModel. # noqa: E501
:return: The layers of this SnowTempViewModel. # noqa: E501
:rtype: list[SnowTempObsViewModel]
"""
return self._layers
@layers.setter
def layers(self, layers):
"""Sets the layers of this SnowTempViewModel.
:param layers: The layers of this SnowTempViewModel. # noqa: E501
:type: list[SnowTempObsViewModel]
"""
self._layers = layers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SnowTempViewModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnowTempViewModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
d92f30b3e758222776245aa95fcb11704d4d7d8b
|
9272584f18cdc8450713b2376fef966934f3fd3e
|
/starblock/starblock.pyde
|
c8bf8e37339c6b6f39cc330a7547603edd25f169
|
[] |
no_license
|
FranciscoPython/TamashiVR
|
ff949ad610ba5f2f870ab9438a2fd89d85079ae0
|
403461bea9f0cff785308089ca2ad69be927697b
|
refs/heads/master
| 2020-12-22T15:26:54.250975 | 2020-03-02T13:14:20 | 2020-03-02T13:14:20 | 236,840,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,105 |
pyde
|
key_mode = 1
padpos1 = 0
x = 10
def frame():
fill(145)
rect (0, 0, 500, 500)
def Innerframe():
fill(45)
rect (10, 10, 480, 480)
padpos= -10
def paddle():
fill(255)
circle(padpos , 460 , 20)
circle(padpos + 20 , 460, 20)
rect(padpos, 450 , 20 , 20)
def setup():
size(500,500)
background(0,0,0)
x=0
y=30
fill (99)
rect(x,y,width, height)
fill (8, 3, 61)
rect (x+10,y+10,width-20, height-50)
for i in range(10):
for j in range(8):
Blue = random(0, 10)
Red = random (2,255)
Green = random (0,10)
fill (Red, Green, Blue)
rect (x+10+ i*48, y+10+j*20, 48, 20)
def draw():
"""
if frameCount%120 == 0:
stroke(8, 3, 61)
fill (8, 3, 61, 80)
rect (10,200,width-20, height-50)
stroke(255, 255, 0, )
fill( 255, 255, 255, 0)
for i in range(250):
Xpos = random(15, 485)
Ypos = random (202,485)
Size = random (.1,.5)
circle( Xpos, Ypos, Size)
if frameCount%360 ==0:
stroke(8, 3, 61)
fill (8, 3, 61, 50)
rect (10,200,width-20, height-50)
"""
stroke(8, 3, 61)
fill (8, 3, 61, 1)
rect (10,200,width-20, height-50)
if frameCount%2 == 0:
stroke(255, 255, 0, )
fill( 255, 255, 255, 0)
Xpos = random(15, 485)
Ypos = random (202,485)
Size = random (.1,.5)
circle(Xpos, Ypos, Size)
stroke(8, 3, 61)
fill (8, 3, 61)
Xpos = random(10, 465)
Ypos = random (200,465)
rect (Xpos, Ypos, 25, 25)
if frameCount%5 == 0:
for i in range(10):
for j in range(8):
Blue = random(0, 10)
Red = random (2,255)
Green = random (0,10)
fill (Red, Green, Blue, 40)
rect (10+ i*48, 40+j*20, 48, 20)
global padpos1
global key_mode
if keyPressed:
key_mode = 1
if mousePressed:
key_mode = 0
if key_mode == 0:
padpos1 = mouseX
if key_mode == 1:
if keyPressed:
if keyCode == LEFT:
padpos1 = padpos1 - 10
if keyCode == RIGHT:
padpos1 = padpos1 + 10
if padpos1 >= 470:
padpos1 = 470
if padpos1 <= 30:
padpos1 = 30
pushMatrix()
translate(padpos1,0)
paddle()
popMatrix()
"""
for j in range(100):
positionX = map(random(0,1), 0, 1, 15, 385)
positionY = map(random(0,1), 0, 1, 200, 385)
for i in range(8):
stroke(255, 255, 0)
strokeWeight(1)
x = 5 * cos(2*PI/8 * i)
y = 5 * sin(2*PI/8 * i)
line ( 200, 200, 200+ x, 200+y)
"""
|
[
"[email protected]"
] | |
a36ae4917549a017d3b949b570e8c4b0577ac04a
|
0f8f6d9ddee615ecd4d54c5c849c9660e1dd3c69
|
/SimpleAIassistant.py
|
cc948040a39195f57aab1f90a354ef22f57438fe
|
[] |
no_license
|
fairy186/SimpleAIassistant
|
a1e9d2553785fb5758133297499b23716afa8beb
|
1bd513b1960d6e5df220e08982ac6ebf3c10bd6d
|
refs/heads/main
| 2023-08-25T09:38:39.081754 | 2021-11-09T09:28:19 | 2021-11-09T09:28:19 | 426,166,840 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,242 |
py
|
import speech_recognition
import pyttsx3
import os
import pygame
from datetime import date, datetime
light = 0
while True:
robot_Listen = speech_recognition.Recognizer()
robot_speak = pyttsx3.init()
voices = robot_speak.getProperty('voices')
robot_speak.setProperty('voice', voices[1].id)
with speech_recognition.Microphone() as mic:
print("Robot: I'm Listening")
robot_Listen.adjust_for_ambient_noise(mic, duration = 1)
audio = robot_Listen.listen(mic)
print("...")
try:
you = robot_Listen.recognize_google(audio)
except:
you = ""
print("You: " + you)
if you == "":
robot = "I cann't hear you, try again"
elif "hello" in you:
robot = "Hello Huy"
elif "time" in you:
gio = datetime.now()
robot = gio.strftime("%H hours %M minutes")
elif "today" in you:
td = date.today()
robot = td.strftime("%B %d, %Y")
elif "turn on" in you:
if light == 0:
light = 1
robot = "the lights were on"
pygame.init()
screen = pygame.display.set_mode((600,600))
running = True
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
i = 0
while running:
screen.fill(BLACK)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if (i < 100):
pygame.draw.rect(screen, WHITE, (200, 300, 200, 200))
pygame.draw.circle(screen, WHITE, (300, 300), 150)
else:
pygame.draw.rect(screen, (255,245,75), (200, 300, 200, 200))
pygame.draw.circle(screen, (255,245,75), (300, 300), 150)
pygame.draw.line(screen, RED, (300, 125), (300, 25), 4)
pygame.draw.line(screen, RED, (475, 300), (575, 300), 4)
pygame.draw.line(screen, RED, (125, 300), (25, 300), 4)
pygame.draw.line(screen, RED, (175, 175), (100, 100), 4)
pygame.draw.line(screen, RED, (475, 175), (575, 75), 4)
i = i+1
if (i==200):
running = False
pygame.time.wait(10)
pygame.display.update()
pygame.quit()
else:
robot = "can not turn on the light because it is on."
elif "turn of" in you:
if light == 1:
light = 0
pygame.init()
screen = pygame.display.set_mode((600,600))
running = True
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
i = 0
while running:
screen.fill(BLACK)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if (i > 100):
pygame.draw.rect(screen, WHITE, (200, 300, 200, 200))
pygame.draw.circle(screen, WHITE, (300, 300), 150)
else:
pygame.draw.rect(screen, (255,245,75), (200, 300, 200, 200))
pygame.draw.circle(screen, (255,245,75), (300, 300), 150)
pygame.draw.line(screen, RED, (300, 125), (300, 25), 4)
pygame.draw.line(screen, RED, (475, 300), (575, 300), 4)
pygame.draw.line(screen, RED, (125, 300), (25, 300), 4)
pygame.draw.line(screen, RED, (175, 175), (100, 100), 4)
pygame.draw.line(screen, RED, (475, 175), (575, 75), 4)
i = i+1
if (i==200):
running = False
pygame.time.wait(10)
pygame.display.update()
pygame.quit()
robot = "the lights were off"
else:
robot = "can not turn off the light because it is off."
elif "note" in you:
robot = "opening notepad"
print ("Robot: " + robot)
print ("")
robot_speak.say(robot)
robot_speak.runAndWait()
os.system("Notepad")
break
elif "game" in you:
robot = "opening Honkai Impact 3"
print ("Robot: " + robot)
print ("")
robot_speak.say(robot)
robot_speak.runAndWait()
os.system('D:\\"Honkai Impact 3"\\falcon_os.exe')
break
elif "browser" in you:
robot = "opening Edge"
print ("Robot: " + robot)
print ("")
robot_speak.say(robot)
robot_speak.runAndWait()
os.system('C:\\"Program Files (x86)"\\Microsoft\\Edge\\Application\\msedge.exe')
break
elif "music" in you:
robot = "opening music"
print ("Robot: " + robot)
print ("")
robot_speak.say(robot)
robot_speak.runAndWait()
os.system('D:\\HT\\Python\\Remenber Me.mp3')
break
elif "bye" in you:
robot = "bye"
print ("Robot: " + robot)
print ("")
robot_speak.say(robot)
robot_speak.runAndWait()
break
else:
robot = "Sorry, I don't understand, try again"
print ("Robot: " + robot)
print ("")
robot_speak.say(robot)
robot_speak.runAndWait()
|
[
"[email protected]"
] | |
feaca30d209710ef59254b4a7a876cbbc712270f
|
016cf414259dccd8e45856ef0cd131cf27f66fec
|
/datapreprocessing/file_to_wav.py
|
a443a9b3b87dee5ea6ad204e9024e6261fc3732e
|
[] |
no_license
|
steinszzh/2020capstone
|
acccd69924ccaf3de77907808422f049631408ac
|
95d223f15ffbd39af2d79532ee0ed73613b4a399
|
refs/heads/master
| 2023-02-03T05:53:06.444073 | 2020-12-21T12:51:01 | 2020-12-21T12:51:01 | 288,187,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 898 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 20:44:10 2020
@author: zhihongz
"""
import os
from pydub import AudioSegment
def convert_to_wav(dir_path):
for file_path in os.listdir(dir_path):
if file_path.split('.')[-1] != "wav":
read_file = AudioSegment.from_file(os.path.join(dir_path,file_path), file_path.split('.')[-1])
os.remove(os.path.join(dir_path,file_path))
base_name = file_path.split('.')[:-1]
# read_file = read_file.set_channels(8)
# base_name = ".".join(base_name)
read_file.export(os.path.join(dir_path,f"{base_name[0]}.wav"), format="wav")
if __name__ == '__main__':
dir_path= './dev-clean/2078/142845' # folder name
all_files = os.listdir(dir_path) # get all filenames # get .wav filenames
conv= convert_to_wav(dir_path)
|
[
"[email protected]"
] | |
12713c973883df5102ea1444ec040bcba5a07233
|
17fc3ebe9bb956dcc00388da6a11d979664c7e10
|
/sklearn_03/stu_and_demo/demo_02.py
|
9d2f1b4b9517a1bcdddb058c52078f2477dc5d2f
|
[] |
no_license
|
jmsxiaoli/holiday_stu_2019
|
054c63cb7bc365418cbb50e563d02e1435446eac
|
5297ae2552562dad55f1fe9debc75f97b4648346
|
refs/heads/master
| 2023-04-04T10:28:55.382556 | 2020-04-27T13:09:48 | 2020-04-27T13:09:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,959 |
py
|
'''
线性回归模型评估指标示例
'''
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
"""
# 利用 diabetes数据集来学习线性回归
# diabetes 是一个关于糖尿病的数据集, 该数据集包括442个病人的生理数据及一年以后的病情发展情况。
# 数据集中的特征值总共10项, 如下:
# 性别
#体质指数
#血压
#s1,s2,s3,s4,s4,s6 (六种血清的化验数据)
#但请注意,以上的数据是经过特殊处理, 10个数据中的每个都做了均值中心化处理,然后又用标准差乘以个体数量调整了数值范围。
#验证就会发现任何一列的所有数值平方和为1.
"""
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
# 增加一个维度,得到一个体质指数数组[[1],[2],...[442]]
diabetesX = diabetes.data[:, np.newaxis, 2]
#print(X)
# Split the data into training/testing sets
X_train = diabetesX[0:-20]
X_test = diabetesX[-20:]
# Split the targets into training/testing sets
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
# Create linear regression object
lr = linear_model.LinearRegression()
# Train the model using the training sets
lr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = lr.predict(X_test)
# The coefficients
# 查看相关系数
print(lr.coef_)
# The mean squared error
# 均方差
# 查看残差平方的均值(mean square error,MSE)
print(mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
# R2 决定系数(拟合优度)
# 模型越好:r2→1
# 模型越差:r2→0
print(r2_score(y_test, y_pred))
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
[
"[email protected]"
] | |
931c1ee3b6dbce31cf3bb4a6cef5c091784387b4
|
0c29b00e47acbb316dbc6d107b5f415af6be33f9
|
/Windenergy_prediction/accounts/models.py
|
e30ccaf60d7bdab5c00d0881a45c5221aae69bea
|
[] |
no_license
|
ganesh12450/Predicting-the-energy-output-of-the-wind-turbines-based-on-weather-conditon
|
638763e20d5d5e8123fdac049387e0f189527c2a
|
e239af4d48da656bcd25e2e8c0af527b184f2a0c
|
refs/heads/master
| 2022-11-21T16:05:20.315890 | 2020-07-15T09:57:35 | 2020-07-15T09:57:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
from django.db import models
# Create your models here.
class user_details(models.Model):
Name = models.CharField(max_length=30)
email = models.CharField(max_length=30)
username = models.CharField(max_length=30)
|
[
"[email protected]"
] | |
51cd6deb2f42faf59d3c7e5bf876fa0321501eb3
|
9407b2b21f4696ff5b392e7ac8eb0b421dd415a5
|
/downloader/models.py
|
b98779828989e32c4a03d5540b3b9c4df0cd4b3e
|
[] |
no_license
|
Craeckie/otrtools
|
15f06bd037a608334904cd032aaa803a02cab254
|
3abad47bfe126e47c9cc1256263b2297bd1dc56e
|
refs/heads/master
| 2023-02-22T07:41:13.267355 | 2022-08-11T21:23:13 | 2022-08-11T21:23:13 | 249,022,513 | 1 | 0 | null | 2023-02-15T18:50:49 | 2020-03-21T17:06:23 |
Python
|
UTF-8
|
Python
| false | false | 411 |
py
|
from django.db import models
class Task(models.Model):
video_url = models.URLField(max_length=1000)
audio_url = models.URLField(max_length=1000, null=True)
otrkey = models.CharField(max_length=200)
decrypted = models.CharField(max_length=200)
cutlist = models.CharField(max_length=5000, null=True)
log = models.CharField(max_length=50000)
keep = models.BooleanField(default=False)
|
[
"[email protected]"
] | |
3c851c00f3168cf06f90684e89022ab2bc3965e0
|
c9697437c292df7fefd68559fdd9636066bdb2f1
|
/dev/animations/quick_sph_harm_anim.py
|
70d6bba7b23d2c08505d1efe4f8e75ea2ef961bf
|
[] |
no_license
|
JoshKarpel/ionization
|
ebdb387483a9bc3fdb52818ab8e897e562ffcc67
|
3056df523ee90147d262b0e8bfaaef6f2678ea11
|
refs/heads/master
| 2021-03-24T13:03:57.469388 | 2020-04-06T03:37:04 | 2020-04-06T03:37:04 | 62,348,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,491 |
py
|
import logging
import os
from copy import deepcopy
import simulacra as si
from simulacra.units import *
import ionization as ion
import matplotlib.pyplot as plt
FILE_NAME = os.path.splitext(os.path.basename(__file__))[0]
OUT_DIR = os.path.join(os.getcwd(), "out", FILE_NAME)
if __name__ == "__main__":
with si.utils.LogManager(
"simulacra", "ionization", stdout_logs=True, stdout_level=logging.DEBUG
) as logger:
anim_kwargs = dict(length=10, target_dir=OUT_DIR)
epot_axman = animation.animators.ElectricPotentialPlotAxis(
show_electric_field=True,
show_vector_potential=False,
show_y_label=False,
show_ticks_right=True,
)
test_state_axman = animation.animators.TestStateStackplotAxis(
states=tuple(
ion.HydrogenBoundState(n, l) for n in range(5) for l in range(n)
)[:8]
)
wavefunction_axman = animation.animators.WavefunctionStackplotAxis(
states=(
ion.HydrogenBoundState(1, 0),
ion.HydrogenBoundState(2, 0),
ion.HydrogenBoundState(3, 1),
)
)
animators = [
animation.animators.PolarAnimator(
postfix="g2",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
shading="flat"
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(test_state_axman),
axman_colorbar=animation.animators.ColorBarAxis(),
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(test_state_axman),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_angmom",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=animation.animators.AngularMomentumDecompositionAxis(
maximum_l=10
),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_wavefunction",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(wavefunction_axman),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_wavefunction_again",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(wavefunction_axman),
axman_colorbar=None,
**anim_kwargs,
),
animation.animators.PolarAnimator(
postfix="g_wavefunction_again_hires",
axman_wavefunction=animation.animators.SphericalHarmonicPhiSliceMeshAxis(
which="g",
colormap=plt.get_cmap("richardson"),
norm=si.vis.RichardsonNormalization(),
shading="flat",
),
axman_lower_right=deepcopy(epot_axman),
axman_upper_right=deepcopy(wavefunction_axman),
axman_colorbar=None,
fig_dpi_scale=2,
**anim_kwargs,
),
]
sim = ion.SphericalHarmonicSpecification(
"sph_harm",
time_initial=0 * asec,
time_final=100 * asec,
r_bound=50 * bohr_radius,
l_bound=20,
r_points=200,
electric_potential=ion.potentials.Rectangle(
start_time=25 * asec,
end_time=75 * asec,
amplitude=1 * atomic_electric_field,
),
# test_states = (ion.HydrogenBoundState(n, l) for n in range(5) for l in range(n)),
use_numeric_eigenstates=True,
numeric_eigenstate_max_energy=10 * eV,
numeric_eigenstate_max_angular_momentum=5,
animators=animators,
).to_sim()
sim.info().log()
sim.run()
sim.info().log()
|
[
"[email protected]"
] | |
ac6a3c014150e6d5977baa99c4a14e69a2f65419
|
f4f2be5885ce0cf7647c856cf202e63163009d5b
|
/University_Lecture/05week_HW_02.py
|
640a0b5f1a76a876af0432b3f886c3ed0b661471
|
[] |
no_license
|
epsilon-d/Python
|
9488198dfff7be446ab99a784b7404fc27cd74d9
|
27a5298a0ba33aa8c63ff9bd6d372ac30be3df04
|
refs/heads/master
| 2022-06-02T09:07:50.988547 | 2022-05-26T18:27:42 | 2022-05-26T18:27:42 | 209,555,591 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 197 |
py
|
# 연습문제 #2: 홀수의 합
sum_num = 0
for x in range(501, 1000, 1):
if x % 2 == 0:
continue
sum_num = sum_num + x
print("500에서 1000까지 홀수의 합: %d" % sum_num)
|
[
"[email protected]"
] | |
71713664da5f286f35226c63cd9d2b695c3b5d4a
|
cee2d75869ffbe682eade7233f4bca24033acaff
|
/mysite/settings.py
|
87268b6945f7a15f1659d33d709f9cd039eae0fc
|
[] |
no_license
|
WTSR8888/djangoblog
|
d0fbcc4015859c0aedd19706ee2a2fac76d37ca5
|
9b17c267bc1476526beb14dae5d76fb75da6e503
|
refs/heads/master
| 2020-04-27T02:30:30.419479 | 2019-03-05T18:04:43 | 2019-03-05T18:04:43 | 173,996,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,190 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ac-bv-e+8_g*&v4-+w@bb$q1f9_xya@s3+io*!a4hej_e7r%+-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
|
[
"[email protected]"
] | |
ba5bb8dfdc1f2e5859c708ef09ebf2485d187b2f
|
cb7dd5c6f33d0e09321cee2a7030ab275e8b541e
|
/unittests/test_orderbook.py
|
74ad06589bfaea69d1abc5009659fbf95b71d952
|
[] |
no_license
|
JakimPL/Iceberg-Order-Book
|
c2e6d473feb5ddb1aa0ee6efaf37eaeab1cb0ea8
|
a6f89198ca102bc377d8b04f06685dc3a0f94479
|
refs/heads/master
| 2023-03-19T13:59:56.581249 | 2021-03-17T23:36:02 | 2021-03-17T23:36:02 | 347,716,932 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,255 |
py
|
import unittest
from modules.order import Order
from modules.orderbook import OrderBook
class TestOrderBook(unittest.TestCase):
def test_order_book_add_order(self):
order_book = OrderBook()
order = Order((1, "Limit", "Buy", 100, 100, 0))
order_book.add(order)
self.assertEqual(order_book.get_state(),
'{"buyOrders": [{"id": 1, "price": 100, "quantity": 100}], "sellOrders": []}')
def test_order_book_cancel_order(self):
order_book = OrderBook()
order = Order((1, "Limit", "Buy", 100, 100, 0))
order_book.add(order)
order_book.cancel(order)
self.assertEqual(order_book.get_state(), '{"buyOrders": [], "sellOrders": []}')
def test_order_book_cancel_not_present_order(self):
order_book = OrderBook()
order = Order((1, "Limit", "Buy", 100, 100, 0))
order_book.add(order)
order_book.cancel(order)
self.assertRaises(ValueError, order_book.cancel, order)
def test_order_book_not_storing_transactions(self):
order_book = OrderBook(store_transactions=False)
order_book.add(Order((1, "Limit", "Buy", 100, 100, 0)))
self.assertEqual(order_book.last_transactions, [])
order_book.add(Order((2, "Limit", "Sell", 100, 100, 0)))
self.assertEqual(order_book.last_transactions, [])
self.assertEqual(order_book.get_state(), '{"buyOrders": [], "sellOrders": []}')
def test_order_book_limit_transactions(self):
order_book = OrderBook(store_transactions=True)
order_book.add(Order((1, "Limit", "Buy", 100, 100, 0)))
self.assertEqual(order_book.last_transactions, [])
order_book.add(Order((2, "Limit", "Sell", 80, 50, 0)))
self.assertEqual(order_book.last_transactions,
['{"buyOrderId": 1, "sellOrderId": 2, "price": 100, "quantity": 50}'])
order_book.add(Order((3, "Limit", "Sell", 120, 40, 0)))
self.assertEqual(order_book.last_transactions, [])
self.assertEqual(order_book.get_state(),
'{"buyOrders": [{"id": 1, "price": 100, "quantity": 50}],'
' "sellOrders": [{"id": 3, "price": 120, "quantity": 40}]}')
def test_order_book_iceberg_transactions(self):
order_book = OrderBook(store_transactions=True)
orders = [
'{"type": "Iceberg", "order": {"direction": "Sell", "id": 1, "price": 100, "quantity": 200, "peak": 100}}',
'{"type": "Iceberg", "order": {"direction": "Sell", "id": 2, "price": 100, "quantity": 300, "peak": 100}}',
'{"type": "Iceberg", "order": {"direction": "Sell", "id": 3, "price": 100, "quantity": 200, "peak": 100}}',
'{"type": "Iceberg", "order": {"direction": "Buy", "id": 4, "price": 100, "quantity": 500, "peak": 100}}'
]
expected_states = [
'{"buyOrders": [], "sellOrders": [{"id": 1, "price": 100, "quantity": 100}]}',
'{"buyOrders": [], "sellOrders":'
' [{"id": 1, "price": 100, "quantity": 100}, {"id": 2, "price": 100, "quantity": 100}]}',
'{"buyOrders": [], "sellOrders":'
' [{"id": 1, "price": 100, "quantity": 100}, {"id": 2, "price": 100, "quantity": 100},'
' {"id": 3, "price": 100, "quantity": 100}]}',
'{"buyOrders": [], "sellOrders":'
' [{"id": 3, "price": 100, "quantity": 100}, {"id": 2, "price": 100, "quantity": 100}]}'
]
expected_transactions = [
[], [], [],
['{"buyOrderId": 4, "sellOrderId": 1, "price": 100, "quantity": 100}',
'{"buyOrderId": 4, "sellOrderId": 2, "price": 100, "quantity": 100}',
'{"buyOrderId": 4, "sellOrderId": 3, "price": 100, "quantity": 100}',
'{"buyOrderId": 4, "sellOrderId": 1, "price": 100, "quantity": 100}',
'{"buyOrderId": 4, "sellOrderId": 2, "price": 100, "quantity": 100}']
]
for i in range(4):
order_book.add(Order(orders[i]))
self.assertEqual(order_book.get_state(), expected_states[i])
self.assertEqual(order_book.last_transactions, expected_transactions[i])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a2f2c2365fbdeb8aa7369aa3fe1d11c10b4e1f3b
|
3dbb88524b16e9a58310f612829380c2152eb54a
|
/mysite/mainApp/migrations/0003_registration2.py
|
47fef75bf3f6b60f5cffe5470a516f2fae1ba4cf
|
[] |
no_license
|
Amir-error/Django_ib_1kurs
|
ccc826896e86cffeeb332df8ba5ea4ae13e528f5
|
e710a14c420fd6c45dba636aa076894483fb0543
|
refs/heads/master
| 2023-04-15T19:22:56.111218 | 2021-05-06T18:08:13 | 2021-05-06T18:08:13 | 341,322,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,083 |
py
|
# Generated by Django 3.1.4 on 2021-01-31 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainApp', '0002_registration_password'),
]
operations = [
migrations.CreateModel(
name='Registration2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='Имя')),
('surname', models.CharField(max_length=20, verbose_name='Фамилия')),
('password', models.CharField(default='admin', max_length=15, verbose_name='Пароль')),
('age', models.IntegerField(verbose_name='Возраст')),
('date', models.DateTimeField(verbose_name='Дата регистрации')),
],
options={
'verbose_name': 'Регистрация2',
'verbose_name_plural': 'Регистрации2',
},
),
]
|
[
"[email protected]"
] | |
1f182e8a36647ee5821729ce8dbb868c606bb379
|
928d800fb38dbc9aeab9e1e4536c559154ca9a40
|
/www/handlers.py
|
0becb2850c49492136de9e63fafd80ffec916a32
|
[] |
no_license
|
weilei0209/awesome-python3-webapp
|
d4664b7cbba8359cc93f24ca6a7f42cc73998bc1
|
3d5b1427d73aa4a0a35238cf02b823a064e556a7
|
refs/heads/master
| 2021-01-22T13:51:42.943883 | 2017-08-30T08:34:37 | 2017-08-30T08:34:37 | 100,683,959 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Wei Lei'
' url handlers '
|
[
"[email protected]"
] | |
7bbd45dc9c290c1a74e2526119f7f5cc401db529
|
874abdd97c48329a10e13845fe75bbb18dbfd650
|
/stocks.py
|
3e863b6654d85f3459bf1f0f72a4721b2fdb4bd5
|
[] |
no_license
|
JakeSigwart/Stock_Dataset
|
ff732cf268bb9b138168947eb0b3ae50d52bec81
|
972b82f80d835785c9682b29b695d3823f3122db
|
refs/heads/master
| 2021-05-03T22:55:15.202175 | 2018-02-06T03:42:33 | 2018-02-06T03:42:33 | 120,394,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,045 |
py
|
import os
import time
import pickle
import numpy as np
import pandas as pd
import datetime as dt
from Stock_dataset import *
path = os.path.dirname(__file__)
todays_date = str(dt.date.today())
tickers = ['AAPL', 'AMZN', 'NVDA', 'GM', 'T', 'CAH']
#sp_500_tickers = np.load(path + '\\data\\tickers.npy')
dataset = Stock_dataset(tickers, path+'\\data\\data.pkl', path+'\\data\\dates.pkl', path+'\\data\\proc.npy')
#dataset.quandl_api_key("YOUR API KEY HERE")
data, dates = dataset.fetch_data('2017-01-01', '2017-07-01')
dataset.save_new_data(data, dates, True)
numeric_data, _ = dataset.organize_data_from_vars(data, dates)
proc_data, processed_data_stock, processed_data_dates, combined_dates = dataset.process_data(numeric_data, dates, False)
#processed_data, dates = dataset.update_data(todays_date) #Un-comment this and comment the above 4 lines after processing first data fetch
num_dates = len(dates)
print(tickers)
print('Data metrics for date: ' + str(dates[num_dates-1]))
print(proc_data[num_dates-1])
|
[
"[email protected]"
] | |
cbccee82c1301e6805ef9d0e03b6ec7445ab82da
|
fc2289618d93309b824d44a61310f096ef37b257
|
/npsTracker/tracker/migrations/0005_auto_20181207_2149.py
|
91b6ed4c3e371cc0bbf8845b08181693dc584cdb
|
[] |
no_license
|
jonna-t/npsTracker
|
50929583a0cd0c3b876f4f6cc1eb34868503b99a
|
f33bdecbfdee0a5374eb0c57789da626a1aa3801
|
refs/heads/master
| 2020-04-18T23:00:43.916947 | 2019-01-27T13:15:30 | 2019-01-27T13:15:30 | 167,810,545 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 661 |
py
|
# Generated by Django 2.1.3 on 2018-12-07 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0004_auto_20181207_2147'),
]
operations = [
migrations.AlterField(
model_name='event',
name='status',
field=models.IntegerField(choices=[('1', '1'), ('2', '2'), ('3', '3')], default='1'),
),
migrations.AlterField(
model_name='event',
name='type',
field=models.CharField(choices=[('PROBLEM', 'Problem'), ('INCIDENT', 'Incident')], default='Problem', max_length=10),
),
]
|
[
"[email protected]"
] | |
f37e0c03f9c5faea225fd44758e373c763d12d40
|
450e32d0a75f05340dc7c3660b01efbd6f2b15e6
|
/log_process/x.py
|
34932c1cb0fa196a175aff4605779e535b8eabb6
|
[
"MIT"
] |
permissive
|
skyzh/fish-agent-simulation-mcm2020
|
679ee29632b8ab2bb5e903c30e4c0f502c4a73aa
|
dfe5b3c0b85ff738df3446cb87610e85136f857e
|
refs/heads/master
| 2022-10-04T02:00:46.554228 | 2020-04-06T02:47:21 | 2020-04-06T02:47:21 | 240,444,153 | 3 | 0 |
MIT
| 2020-06-07T20:53:32 | 2020-02-14T06:37:03 |
Rust
|
UTF-8
|
Python
| false | false | 108 |
py
|
while True:
x = []
for j in range(12):
x.append(float(input()))
print(sum(x) / 12)
|
[
"[email protected]"
] | |
6bfea2a30adb4e10c535fc96d2806b7810ad2881
|
6995b59905175bab4307c6bca548aba25463c06a
|
/app/routes.py
|
c8f1d9c7f4d6db2ccae658367c8c94cf0aa20790
|
[] |
no_license
|
CataSt11/Automation-index
|
9ac06664b264df925b173828765a6404b648de5d
|
2be2268d8633d4ac75d4c87c05015074214988da
|
refs/heads/master
| 2023-05-11T12:17:01.843419 | 2020-11-08T14:07:47 | 2020-11-08T14:07:47 | 309,156,926 | 0 | 0 | null | 2023-05-01T21:52:15 | 2020-11-01T18:05:01 |
Python
|
UTF-8
|
Python
| false | false | 22,561 |
py
|
from app import app
from flask import render_template, make_response, jsonify, request, session
from app.functions import *
import datetime
import calendar
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
data = {}
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
data["errors"] = str(e)
return render_template('index.j2', title='Home', data=data, session=session.get("_flashes"))
data["db_name"] = "automationdb"
data["content"] = "route /"
data["method"] = request.args.get("page")
if data["method"] == "departments":
data["page"] = "type1"
else:
data["page"] = "type2"
return render_template('index.j2', title='Home', data=data, session=session.get("_flashes"))
@app.route('/departments')
def departments():
data = {}
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
data["errors"] = str(e)
return render_template('index.j2', title='Home', data=data, session=session.get("_flashes"))
mycursor = im.mycursor
data["db_name"] = "automationdb"
sql_query = \
"""
SELECT * FROM departments
"""
mycursor.execute(sql_query)
data["departments"] = mycursor.fetchall()
return render_template('departments.j2', title='Departments', data=data, )
@app.route('/reports')
def reports():
data = {}
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
data["errors"] = str(e)
return render_template('index.j2', title='Home', data=data, session=session.get("_flashes"))
mycursor = im.mycursor
data["db_name"] = "automationdb"
data["time_saved"] = []
sql_query = \
"""
SELECT MIN(tasks_executions.timestamp) AS min_date FROM tasks_executions
"""
mycursor.execute(sql_query)
results = mycursor.fetchall()
cur_year = datetime.datetime.now().year
if results[0]["min_date"] is None:
min_year = cur_year
else:
min_year = results[0]["min_date"].year
for year in range(min_year, cur_year + 1):
max_month = 12
if year == cur_year:
max_month = datetime.datetime.now().month
for month in range(1, max_month + 1):
first_day = 1
last_day = calendar.monthrange(year,month)[1]
sql_query = \
f"""
SELECT sum(tasks.time_of_completion) AS time_saved
FROM tasks_executions
JOIN tasks
ON tasks.id=tasks_executions.task_id
JOIN connections_tasks_automations
ON tasks.id=connections_tasks_automations.task_id
WHERE tasks_executions.timestamp >= '{year}-{str(month).zfill(2)}-{str(first_day).zfill(2)}'
AND tasks_executions.timestamp <= '{year}-{str(month).zfill(2)}-{str(last_day).zfill(2)}'
"""
mycursor.execute(sql_query)
results = mycursor.fetchall()
if results[0]["time_saved"] is None:
time_saved = 0
else:
time_saved = int(results[0]["time_saved"])
data["time_saved"].append({"year":str(year),"month":str(month).zfill(2), "time_saved":time_saved})
return render_template('reports.j2', title='reports', data=data)
@app.route('/workflows')
def workflows():
data = {}
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
data["errors"] = str(e)
return render_template('index.j2', title='Home', data=data, session=session.get("_flashes"))
mycursor = im.mycursor
data["db_name"] = "automationdb"
sql_query = \
"""
SELECT * FROM departments
"""
mycursor.execute(sql_query)
data["departments"] = mycursor.fetchall()
sql_query = \
"""
select workflows.id as workflow_id,
workflows.name as workflow_name,
workflows.description as workflow_description
from workflows
ORDER BY workflows.name
"""
mycursor.execute(sql_query)
results = mycursor.fetchall()
data["workflows"] = {}
for item in results:
if data["workflows"].get(item["workflow_id"]) is None:
data["workflows"][item["workflow_id"]] = {
"workflow_name": item["workflow_name"],
"workflow_description": item["workflow_description"],
}
sql_query = \
f"""
SELECT departments.id, departments.name
FROM connections_workflows_departments
JOIN departments
ON connections_workflows_departments.department_id = departments.id
WHERE connections_workflows_departments.workflow_id = {item['workflow_id']}
ORDER BY departments.name
"""
mycursor.execute(sql_query)
results2 = mycursor.fetchall()
if data["workflows"][item["workflow_id"]].get("departments") is None:
data["workflows"][item["workflow_id"]]["departments"] = {}
for task_id in results2:
data["workflows"][item["workflow_id"]]["departments"][task_id["id"]] = task_id["name"]
data["workflows"][item["workflow_id"]]["tasks"] = {}
sql_query = \
f"""
SELECT tasks.*,
connections_tasks_automations.automation_tool_id,
automation_tools.name as automation_tool_name
FROM
tasks left join connections_tasks_automations
on tasks.id= connections_tasks_automations.task_id
left join automation_tools
on connections_tasks_automations.automation_tool_id = automation_tools.id
WHERE
tasks.workflow_id = {item['workflow_id']} AND
tasks.visibility = 'enabled'
ORDER BY tasks.order_number
"""
mycursor.execute(sql_query)
results3 = mycursor.fetchall()
for item3 in results3:
data["workflows"][item["workflow_id"]]["tasks"][item3["id"]] = {
"name": item3["name"],
"time_of_completion": item3["time_of_completion"],
"order_number": item3["order_number"],
"automation_tool_id": item3["automation_tool_id"],
"automation_tool_name": item3["automation_tool_name"],
}
sql_query = \
"""
SELECT * FROM automation_tools
"""
mycursor.execute(sql_query)
results = mycursor.fetchall()
data["automation_tools"] = results
return render_template('workflows.j2', title='workflows', data=data)
@app.route('/automation-tools')
def automation_tools():
data = {}
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
data["errors"] = str(e)
return render_template('index.j2', title='Home', data=data, session=session.get("_flashes"))
mycursor = im.mycursor
data["db_name"] = "automationdb"
sql_query = \
"""
SELECT * FROM automation_tools
"""
mycursor.execute(sql_query)
results = mycursor.fetchall()
data["automation-tools"] = []
for item in results:
data["automation-tools"].append(item["name"])
return render_template('automation-tools.j2', title='automation-tools', data=data)
@app.route('/database/departments', methods=["POST", "PATCH", "DELETE"])
def database_delete_department():
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
response = {"message": f"Could not connect to database server.\n{str(e)}", "code": "FAILURE"}
return make_response(jsonify(response), 400)
mycursor = im.mycursor
mydb = im.conn
payload = request.get_json()
if request.method == "DELETE":
if payload.get("department_id") is None:
response = {"message": "department_id is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "DELETE FROM departments WHERE id=%s LIMIT 1"
try:
mycursor.execute(query, (payload['department_id'], ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "PATCH":
if payload.get("department_id") is None or payload.get("department_name") is None:
response = {"message": "department_id or department_name is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "UPDATE departments SET name=%s WHERE id=%s LIMIT 1"
try:
mycursor.execute(query, (payload['department_name'], payload['department_id'], ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "POST":
if payload.get("department") is None:
response = {"message": "department is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "SELECT * FROM departments WHERE name=%s"
try:
mycursor.execute(query, (payload['department'], ))
results = mycursor.fetchall()
except mysql.connector.errors.ProgrammingError:
response = {"message": f"SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
if len(results) != 0:
response = {"message": f"Could not insert department {payload['department']} into database because it already exists.", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "INSERT INTO departments(`id`, `name`) VALUES (null, %s)"
try:
mycursor.execute(query, (payload['department'], ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {'message': 'Department inserted into database', 'code': 'SUCCESS'}
return make_response(jsonify(response), 201)
@app.route('/database/workflows', methods=["POST", "PATCH", "DELETE"])
def database_workflows():
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
response = {"message": f"Could not connect to database server.\n{str(e)}", "code": "FAILURE"}
return make_response(jsonify(response), 400)
mycursor = im.mycursor
mydb = im.conn
payload = request.get_json()
if request.method == "POST":
for parameter in ["name"]:
if parameter not in payload.keys():
response = {"message": f"'{parameter}' is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "INSERT INTO workflows VALUES(null, %s, '')"
try:
mycursor.execute(query, (payload['name'], ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "PATCH":
for parameter in ["id", "name"]:
if parameter not in payload.keys():
response = {"message": f"'{parameter}' is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "UPDATE workflows SET name=%s WHERE id=%s LIMIT 1"
try:
mycursor.execute(query, (payload['name'], int(payload['id']), ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "DELETE":
for parameter in ["id"]:
if parameter not in payload.keys():
response = {"message": f"'{parameter}' is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "DELETE FROM workflows WHERE id=%s LIMIT 1"
try:
mycursor.execute(query, (int(payload['id']), ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
@app.route('/database/tasks', methods=["POST", "PATCH", "DELETE"])
def database_tasks():
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
response = {"message": f"Could not connect to database server.\n{str(e)}", "code": "FAILURE"}
return make_response(jsonify(response), 400)
mycursor = im.mycursor
mydb = im.conn
payload = request.get_json()
if request.method == "DELETE":
if payload.get("id") is None:
response = {"message": "department_id is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = "UPDATE tasks SET visibility='disabled' WHERE id=%s LIMIT 1"
try:
mycursor.execute(query, (payload['id'], ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "POST":
if payload.get("name") is None:
response = {"message": "name is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
if payload.get("workflow_id") is None:
response = {"message": "workflow_id is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
# calculate the order_number
query = "SELECT max(order_number) AS max_order_number FROM tasks WHERE workflow_id = %s"
try:
mycursor.execute(query, (payload['workflow_id'],))
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
results = mycursor.fetchall()
if results[0]["max_order_number"] is None:
order_number = 1
else:
order_number = results[0]["max_order_number"] + 1
query = "INSERT INTO tasks VALUES(null, %s, %s, 'enabled', %s, 0)"
try:
mycursor.execute(query, (int(payload['workflow_id']), payload['name'], order_number))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "PATCH":
for idx,elem in enumerate(payload):
for item in ["id", "name", "time_of_completion", "automation_tool_id"]:
if item not in elem.keys():
response = {"message": f"'{item}' is not given on row {idx}", "code": "FAILURE"}
return make_response(jsonify(response), 400)
for idx,elem in enumerate(payload):
query = "UPDATE tasks SET name=%s, visibility='enabled', order_number=%s, time_of_completion=%s WHERE id=%s LIMIT 1"
try:
mycursor.execute(query, (elem['name'], int(idx+1), int(elem['time_of_completion']), int(elem['id']), ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
query = "SELECT * FROM connections_tasks_automations WHERE task_id=%s"
try:
mycursor.execute(query, (int(elem['id']), ))
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
results = mycursor.fetchall()
if len(results) != 0 and len(str(elem["automation_tool_id"])) == 0:
query = "DELETE FROM connections_tasks_automations WHERE task_id=%s LIMIT 1"
try:
mycursor.execute(query, (int(elem["id"]), ))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
if len(results) != 0 and elem["automation_tool_id"] is not None and len(str(elem["automation_tool_id"])) == 0:
query = "DELETE FROM connections_tasks_automations WHERE task_id=%s LIMIT 1"
try:
mycursor.execute(query, (int(elem["id"]), ))
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
if len(results) == 0 and elem["automation_tool_id"] is not None and len(str(elem["automation_tool_id"])) != 0:
query = "INSERT INTO connections_tasks_automations VALUES(null, %s, %s)"
try:
mycursor.execute(query, (int(elem["id"]), elem["automation_tool_id"],))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
@app.route('/database/workflows-associations', methods=["POST", "DELETE"])
def database_workflows_associations():
try:
im = init_mysql()
except mysql.connector.errors.ProgrammingError as e:
response = {"message": f"Could not connect to database server.\n{str(e)}", "code": "FAILURE"}
return make_response(jsonify(response), 400)
mycursor = im.mycursor
mydb = im.conn
payload = request.get_json()
print(payload)
if request.method == "POST":
for parameter in ["workflow_id", "department_id"]:
if parameter not in payload.keys() or parameter is None:
response = {"message": f"'{parameter}' is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
for parameter in ["workflow_id", "department_id"]:
if payload[parameter] is None:
response = {"message": f"'{parameter}' must be not None", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = " INSERT INTO connections_workflows_departments VALUES(null, %s, %s)"
try:
mycursor.execute(query, (int(payload["workflow_id"]), int(payload["department_id"]),))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
if request.method == "DELETE":
for parameter in ["workflow_id", "department_id"]:
if parameter not in payload.keys():
response = {"message": f"'{parameter}' is not given", "code": "FAILURE"}
return make_response(jsonify(response), 400)
query = " DELETE FROM connections_workflows_departments WHERE workflow_id=%s AND department_id=%s LIMIT 1"
try:
mycursor.execute(query, (int(payload["workflow_id"]), int(payload["department_id"]),))
mydb.commit()
except mysql.connector.errors.ProgrammingError:
response = {"message": "SQL query resulted in an error.", "code": "FAILURE"}
print(f"Query failed: {query}\n{mycursor.statement}")
return make_response(jsonify(response), 400)
response = {"message": "", "code": "SUCCESS"}
return make_response(jsonify(response), 200)
@app.route('/database/automation-tools-associations', methods=["DELETE"])
def database_delete_automation_tools_associations():
pass
|
[
"[email protected]"
] | |
0497e0262a8ee739513125f73d20dec716f79060
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/cylicRot_20200714234806.py
|
755b17fab1acf221b7f045ba530fc306bc41432f
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 536 |
py
|
# given an array rotate it k times to the right
def rotate(arr,k):
# first I'd rotate the array once
# so how do we rotate the array
# we move the last element to the firs place and
# the rest follow suit
# [1,2,3,4]
# [4,2,3,1]
# [4,1,3,2]
# [4,1,2,3]
# [4,1,2,3]
# all we are doing is swapping the elements
newArr = []
for i in range(len(arr)):
k = len(arr) - 1
print('k',k,'i',i)
arr[i],arr[k] = arr[k],arr[i]
print(arr)
rotate([1,2,3,4],4)
|
[
"[email protected]"
] | |
884da089c1ad2da26bab7d0e24816593e1a9b0a6
|
7f155be68a153c93321e33d1ceaf0e3c43443b66
|
/batchHobj2Tiff.py
|
569e05ab3bc83a4ec57dead019cb08e89d4b307d
|
[] |
no_license
|
EthanHC21/OMA-M
|
eff328a710f27fd62e27b3a303055ba6517f2ac6
|
6a83fb7ef3f5f22cfe4a6be3a0f791b04169424c
|
refs/heads/master
| 2023-08-14T00:00:00.266298 | 2021-09-21T01:45:06 | 2021-09-21T01:45:06 | 354,226,788 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,205 |
py
|
import numpy as np
import cv2
import struct, time, os
def read_hobj(image_filename):
start_time = time.time()
print('Getting HOBJ file >>> {:s}'.format(image_filename))
with open(image_filename,'rb') as f:
ibuffer = f.read(84)
#
npixels = struct.unpack('i',ibuffer[72:76][::-1])[0]
rows = struct.unpack('h',ibuffer[80:82][::-1])[0]+1
cols = struct.unpack('h',ibuffer[82:84][::-1])[0]+1
#
print('Image width/height is {:d}/{:d}.'.format(cols, rows))
print('# of pixels is {:d}.'.format(npixels))
#
f.seek(84+rows*6+17)
image_str = f.read(npixels*2)
bit_format = '<{:d}{:s}'.format(npixels, 'h')
image_array = struct.unpack(bit_format, image_str)
print('Read time: {:.2f} seconds.'.format(time.time() - start_time))
return np.array(image_array).reshape(rows , cols)
# Path to .hobj images
imgFolder = r'D:\Documents\School Documents\2020-2021 Senior Year College\Research\Data\20029J1\HOBJ'
# Path to save converted .tiff files
saveDir = r'D:\Documents\School Documents\2020-2021 Senior Year College\Research\Data\20029J1\Tiff'
# Path to save scaled .tiff files
sclSaveDir = r'D:\Documents\School Documents\2020-2021 Senior Year College\Research\Data\20029J1\Scaled'
for fileName in os.listdir(imgFolder):
# get the path to the image
imgPath = os.path.join(imgFolder, fileName)
# read the hobj as a numpy array
imgArr = read_hobj(imgPath)
# scale the image array so we can see it
sclImgArr = imgArr - np.min(imgArr)
sclImgArr = (sclImgArr * float(np.iinfo(np.uint16).max) / np.max(sclImgArr)).astype(np.uint16)
# convert the array to uint16 for tiff purposes
imgArr = imgArr.astype(np.uint16)
# debayer it into a color image (RGGB CFA)
# imgArr = cv2.cvtColor(imgArr, cv2.COLOR_BayerRG2RGB)
sclImgArr = cv2.cvtColor(sclImgArr, cv2.COLOR_BayerRG2RGB)
# remove hobj from the end of the file (leaving the .)
fileNameNoExt = fileName[0:(len(fileName) - 4)]
# add the tiff extension
fileNameTiff = fileNameNoExt + 'tiff'
# write the files
cv2.imwrite(os.path.join(saveDir, fileNameTiff), imgArr)
cv2.imwrite(os.path.join(sclSaveDir, fileNameTiff), sclImgArr)
|
[
"[email protected]"
] | |
6fd9bb6dae2bb50a8a9fbac2eb6d9c75a6b3da23
|
17b22d94c938bddafd4420424997a5b82afca6f9
|
/hw3.py
|
5681a5988ca075e7baa42656fd9a02b0070d78bf
|
[] |
no_license
|
amikoz/HW3-Kozenasheva151
|
42623a21c5a7c6f2522f15034d834b9c9073eaed
|
a4ab011d2cb18843bb7551cdbb829a8ed33bc53a
|
refs/heads/master
| 2021-06-08T21:12:15.162713 | 2016-12-04T21:15:16 | 2016-12-04T21:15:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,574 |
py
|
import re
import urllib.request
import os
import html
def download_page1(pageUrl1):
try:
page1 = urllib.request.urlopen(pageUrl1)
text1 = page1.read().decode('UTF-8')
except:
text1 = 'unavailable page'
return text1
def txt_1(text1):
regPostTitletxt1 = re.compile(' <b class="regnum_title">REGNUM</b></span>(.*?)</div>', flags=re.DOTALL)
t1 = regPostTitletxt1.findall(text1)
if t1:
txt_1 = t1
new_text1 = []
regTag1 = re.compile('<.*?>', flags=re.DOTALL)
regSpace1 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext1 in txt_1:
clean_t1 = regSpace1.sub("", finaltext1)
clean_t = regTag1.sub("", clean_t1)
new_text1.append(clean_t)
for finaltext1 in new_text1:
finaltext1.replace(" →»—«–", " ")
if finaltext1:
txt_1= html.unescape(finaltext1)
else:
txt_2 = 'no text'
return txt_1
def func1(txt_1):
n = txt_1.lower()
n2 = n.replace(',', '')
n1 = n2.replace('.', '')
n0 = n1.replace('»', '')
n3 = n0.replace('«', '')
n4 = n3.replace('-', '')
n5 = n4.replace('\n', '')
n6 = n5.replace(':', '')
n7 = re.sub(u"[0-9]{1,}", " ", n6)
m1 = n7.split(" ")
A = set(m1)
return A
def download_page2(pageUrl2):
try:
page2 = urllib.request.urlopen(pageUrl2)
text2 = page2.read().decode('UTF-8')
except:
text2 = 'unavailable page'
return text2
def txt_2(text2):
regPostTitletxt2 = re.compile('<div itemprop="articleBody">(.*?)<div data-type="Incut. By wide" class="b-read-more b-read-more_wide">', flags=re.DOTALL)
t2 = regPostTitletxt2.findall(text2)
if t2:
txt_2= t2
new_text2 = []
regTag2 = re.compile('<.*?>', flags=re.DOTALL)
regSpace2 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext2 in txt_2:
clean_t2 = regSpace2.sub("", finaltext2)
clean_t2 = regTag2.sub("", clean_t2)
new_text2.append(clean_t2)
for finaltext2 in new_text2:
finaltext2.replace(" →»—«–", " ")
if finaltext2:
txt_2 = html.unescape(finaltext2)
else:
txt_2 = 'no text'
return txt_2
def func2(txt_2):
n2 = txt_2.lower()
n22 = n2.replace(',', '')
n12 = n22.replace('.', '')
n02 = n12.replace('»', '')
n32 = n02.replace('«', '')
n42 = n32.replace('-', '')
n52 = n42.replace('\n', '')
n62 = n52.replace(':', '')
n72 = re.sub(u"[0-9]{1,}", " ", n62)
m2 = n72.split(" ")
B = set(m2)
return B
def download_page3(pageUrl3):
try:
page3 = urllib.request.urlopen(pageUrl3)
text3 = page3.read().decode('UTF-8')
except:
text3 = 'unavailable page'
return text3
def txt_3(text3):
regPostTitletxt3 = re.compile('<div class="b-text clearfix js-topic__text" itemprop="articleBody">(.*?)<aside class="b-inline-topics-box b-box_floated b-inline-topics-box_wide b-box_left">', flags=re.DOTALL)
t3 = regPostTitletxt3.findall(text3)
if t3:
txt_3 = t3
new_text3 = []
regTag3 = re.compile('<.*?>', flags=re.DOTALL)
regSpace3 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext3 in txt_3:
clean_t3 = regSpace3.sub("", finaltext3)
clean_t3 = regTag3.sub("", clean_t3)
new_text3.append(clean_t3)
for finaltext3 in new_text3:
finaltext3.replace(" →»—«–", " ")
if finaltext3:
txt_3 = html.unescape(finaltext3)
else:
txt_3 = 'no text'
return txt_3
def func3(txt_3):
n3 = txt_3.lower()
n23 = n3.replace(',', '')
n13 = n23.replace('.', '')
n03 = n13.replace('»', '')
n33 = n03.replace('«', '')
n43 = n33.replace('-', '')
n53 = n43.replace('\n', '')
n63 = n53.replace(':', '')
n73 = re.sub(u"[0-9]{1,}", " ", n63)
m3 = n73.split(" ")
C = set(m3)
return C
def download_page4(pageUrl4):
try:
page4 = urllib.request.urlopen(pageUrl4)
text4 = page4.read().decode('UTF-8')
except:
text4 = 'unavailable page'
return text4
def txt_4(text4):
regPostTitletxt4 = re.compile('<p class="lid">(.*?)<p><div class="article__incut">', flags=re.DOTALL)
t4 = regPostTitletxt4.findall(text4)
if t4:
txt_4 = t4
new_text4 = []
regTag4 = re.compile('<.*?>', flags=re.DOTALL)
regSpace4 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext4 in txt_4:
clean_t4 = regSpace4.sub("", finaltext4)
clean_t4 = regTag4.sub("", clean_t4)
new_text4.append(clean_t4)
for finaltext4 in new_text4:
finaltext4.replace(" →»—«–", " ")
if finaltext4:
txt_4 = html.unescape(finaltext4)
else:
txt_4 = 'no text'
return txt_4
def func4(txt_4):
n4 = txt_4.lower()
n24 = n4.replace(',', '')
n14 = n24.replace('.', '')
n04 = n14.replace('»', '')
n34 = n04.replace('«', '')
n44 = n34.replace('-', '')
n54 = n44.replace('\n', '')
n64 = n54.replace(':', '')
n74 = re.sub(u"[0-9]{1,}", " ", n64)
m4 = n74.split(" ")
D = set(m4)
return D
def intersec(A, B, C, D):
inter1 = A.intersection(B)
inter2 = inter1.intersection(C)
inter = inter2.intersection(D)
print('Пересечение множеств: ', inter)
def symmdif(A, B, C, D):
sd1 = A.symmetric_difference(B)
sd2 = sd1.symmetric_difference(C)
sd = sd2.symmetric_difference(D)
print('Симметрическая разность множeств: ', sd)
def main():
pageUrl1 = 'https://regnum.ru/news/innovatio/2211264.html'
text1 = download_page1(pageUrl1)
g1 = txt_1(text1)
b1 = func1(g1)
pageUrl2 = 'https://rg.ru/2016/11/29/na-marse-obnaruzhen-labirint.html'
text2 = download_page2(pageUrl2)
g2 = txt_2(text2)
b2 = func2(g2)
pageUrl3 = 'https://lenta.ru/news/2016/11/29/mars/'
text3 = download_page3(pageUrl3)
g3 = txt_3(text3)
b3 = func3(g3)
pageUrl4 = 'http://www.mk.ru/science/2016/11/29/tainstvennyy-labirint-na-marse-privlek-vnimanie-planetologov.html'
text4 = download_page4(pageUrl4)
g4 = txt_4(text4)
b4 = func4(g4)
intersec(b1, b2, b3, b4)
symmdif(b1, b2, b3, b4)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
792d8426759a5c85f9af9fba9e0fdfa56c425d1f
|
681ad82b3c7f18411f83a4be2c190a7cd123ce8a
|
/EDBRCommon/python/datasets/cmgTupleList_XWW/cmgTuple_0627/cmgTuple_SingleElectron_Run2012D_PromptReco_xww_cff.py
|
b4b251384f77aa7ba8b6b17ce20739ec6b976a18
|
[] |
no_license
|
cms-edbr/ExoDiBosonResonances
|
5009161fdc76b39f121316e26497bedd29abe3d7
|
b8ae400a20bfb8ed66c83b8f38e98d853058ae17
|
refs/heads/master
| 2021-01-19T18:33:17.435519 | 2014-03-12T12:00:43 | 2014-03-12T12:00:43 | 12,613,661 | 0 | 0 | null | 2015-10-18T15:06:39 | 2013-09-05T09:06:16 |
Python
|
UTF-8
|
Python
| false | false | 28,612 |
py
|
import FWCore.ParameterSet.Config as cms
cmgFiles = cms.untracked.vstring()
source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cmgFiles
)
cmgFiles.extend([
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_0.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_1.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_10.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_100.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_101.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_102.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_103.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_104.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_105.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_106.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_107.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_108.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_109.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_11.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_110.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_111.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_112.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_113.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_114.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_115.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_116.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_117.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_118.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_119.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_12.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_120.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_121.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_122.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_123.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_124.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_125.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_126.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_127.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_128.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_129.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_13.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_130.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_131.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_132.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_133.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_134.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_135.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_136.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_137.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_138.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_139.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_14.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_140.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_141.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_142.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_143.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_144.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_145.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_146.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_147.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_148.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_149.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_15.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_150.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_151.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_152.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_153.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_154.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_155.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_156.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_157.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_158.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_159.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_16.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_160.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_161.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_162.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_163.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_164.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_165.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_166.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_167.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_168.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_169.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_17.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_170.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_171.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_172.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_173.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_174.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_175.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_176.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_177.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_178.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_179.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_18.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_180.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_181.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_182.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_183.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_184.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_185.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_186.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_187.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_188.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_189.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_19.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_190.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_191.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_192.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_193.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_194.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_195.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_196.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_197.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_198.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_199.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_2.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_20.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_200.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_201.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_202.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_203.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_204.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_205.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_206.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_207.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_208.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_209.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_21.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_210.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_211.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_212.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_213.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_214.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_22.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_23.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_24.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_25.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_26.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_27.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_28.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_29.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_3.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_30.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_31.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_32.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_33.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_34.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_35.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_36.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_37.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_38.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_39.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_4.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_40.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_41.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_42.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_43.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_44.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_45.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_46.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_47.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_48.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_49.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_5.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_50.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_51.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_52.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_53.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_54.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_55.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_56.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_57.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_58.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_59.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_6.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_60.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_61.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_62.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_63.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_64.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_65.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_66.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_67.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_68.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_69.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_7.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_70.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_71.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_72.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_73.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_74.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_75.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_76.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_77.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_78.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_79.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_8.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_80.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_81.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_82.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_83.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_84.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_85.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_86.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_87.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_88.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_89.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_9.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_90.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_91.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_92.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_93.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_94.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_95.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_96.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_97.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_98.root',
'/store/cmst3/group/exovv/CMGtuple/shuai/production0627/Run2012/CA8//SingleElectron_Run2012D_PromptReco_xww/cmgTuple_99.root',
])
|
[
""
] | |
1cf3376248e4dae4f7b043f2554ed9f25c909d80
|
7bbe2060385e1d732bb125b881562c64618075e0
|
/Python/GaeaPipeline/workflow/H_merge_vcf.py
|
cae0f2ee23a1dd7871002045f737b5dd34f951a4
|
[] |
no_license
|
jjmini/dmcade
|
7071cefd451946de14ee3817568b8d7c6ce5bf7e
|
8c621126c3b18e4d4f0daed27fe77db35e16fd31
|
refs/heads/master
| 2022-12-08T13:55:53.355357 | 2019-07-07T06:09:33 | 2019-07-07T06:09:33 | 195,595,960 | 0 | 0 | null | 2022-07-07T23:10:18 | 2019-07-07T00:58:48 |
Java
|
UTF-8
|
Python
| false | false | 4,257 |
py
|
# encoding: utf-8
import os
from gaeautils.bundle import bundle
from gaeautils.workflow import Workflow
__updated__ = '2018-05-31'
class merge_vcf(Workflow):
""" merge_vcf """
INIT = bundle(merge_vcf=bundle())
INIT.merge_vcf.program = "gaeatools.jar"
INIT.merge_vcf.bcftools = ""
INIT.merge_vcf.bcftools_param = "-t"
INIT.merge_vcf.parameter = ""
INIT.merge_vcf.uploadvcf = False
INIT.merge_vcf.check_param = ""
INIT.merge_vcf.bed_list = ""
def run(self, impl, dependList):
impl.log.info("step: merge_vcf!")
inputInfo = self.results[dependList[0]].output
result = bundle(output=bundle(), script=bundle())
if 'bed_list' in self.file:
self.merge_vcf.bed_list = self.expath('file.bed_list')
# extend program path
self.merge_vcf.program = self.expath('merge_vcf.program')
self.merge_vcf.bed_list = self.expath('merge_vcf.bed_list')
self.merge_vcf.bcftools = self.expath('merge_vcf.bcftools', False)
# global param
hadoop_parameter = ''
if self.hadoop.get('queue'):
hadoop_parameter += ' -D mapreduce.job.queuename={} '.format(self.hadoop.queue)
ParamDict = {
"PROGRAM": "%s jar %s" % (self.hadoop.bin, self.merge_vcf.program),
"HADOOPPARAM": hadoop_parameter
}
JobParamList = []
for sampleName in inputInfo:
scriptsdir = impl.mkdir(self.gaeaScriptsDir, sampleName)
outputPath = impl.mkdir(self.option.workdir, "variation", sampleName)
result.output[sampleName] = os.path.join(outputPath, "{}.hc.vcf.gz".format(sampleName))
upload_tmp = os.path.join(self.option.dirHDFS, sampleName, 'vcf_tmp')
# global param
JobParamList.append({
"SAMPLE": sampleName,
"SCRDIR": scriptsdir,
"UPLOAD_TMP": upload_tmp,
"DATALIST": os.path.join(scriptsdir, 'vcf_data.list'),
"VCF_TMP": inputInfo[sampleName]['vcf'],
"VCF": result.output[sampleName]
})
if self.merge_vcf.uploadvcf:
vcf_suffix = ".hc.vcf.gz"
dataParam = []
with open(self.merge_vcf.bed_list, 'r') as beds:
for bed in beds:
basename = '{}{}'.format(os.path.splitext(os.path.basename(bed))[0], vcf_suffix)
dataParam.append({
"KEY": os.path.join(inputInfo[sampleName]['vcf'], basename)
})
impl.write_file(
fileName='vcf_data.list',
scriptsdir=scriptsdir,
commands=["${KEY}"],
JobParamList=dataParam)
cmd = ["source %s/bin/activate" % self.GAEA_HOME,
'check_hc_part.py -b %s -p ${VCF_TMP} %s' % (self.merge_vcf.bed_list, self.merge_vcf.check_param),
'if [ $? != 0 ]\nthen',
'\texit 1',
'fi'
]
if self.merge_vcf.uploadvcf:
cmd.extend([
'%s ${UPLOAD_TMP}' % self.fs_cmd.delete,
'${PROGRAM} GzUploader -i ${DATALIST} -o ${UPLOAD_TMP} -l',
'if [ $? != 0 ]\nthen',
'\texit 1',
'fi',
'wait',
'${PROGRAM} SortVcf ${HADOOPPARAM} -R 400 -p /tmp/partitionFiles/vcfsort/reducer400_partitons.lst '
'-input ${UPLOAD_TMP} -output file://${VCF}\n',
])
else:
cmd.extend([
'rm ${VCF_TMP}/*tbi',
'wait',
'${PROGRAM} SortVcf ${HADOOPPARAM} -input file://${VCF_TMP} -output file://${VCF}\n'
])
if self.merge_vcf.bcftools:
cmd.append("%s index %s ${VCF}" % (self.merge_vcf.bcftools, self.merge_vcf.bcftools_param))
# write script
scriptPath = \
impl.write_scripts(
name='merge_vcf',
commands=cmd,
JobParamList=JobParamList,
paramDict=ParamDict)
# result
result.script.update(scriptPath)
return result
|
[
"https://[email protected]"
] |
https://[email protected]
|
710bdfba83d3af4084ef5374be28722fb9f47424
|
05d884fccb0d8d46024f8df8d4d93d8911c9e9d5
|
/Deep_VAMP/ConvRecL/test_fullimage/make_predictions_fullimage.py
|
b2678a7ecd7d292ea53c3ddf223b16f03b697435
|
[] |
no_license
|
havaeimo/Deep_VAMP
|
6305c5d1a9a642d7ef11daeb5e8240e72e165ff8
|
041c1b5c8da762ef5e087d07e75302f86e4fa541
|
refs/heads/master
| 2021-01-10T11:35:47.367156 | 2015-07-14T23:37:23 | 2015-07-14T23:37:23 | 36,247,348 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,124 |
py
|
import pdb
import numpy as np
#from matplotlib.pyplot import imsave
import numpy
from os.path import isfile, join
import argparse
from itertools import izip
import scipy
import cPickle
import theano
import os
import os.path
from PIL import Image
import PIL
from pylearn2.datasets.deep_vamp import toronto_preprocessing
def makepatches(image):
idx = 0
height,width = image.shape[:-1]
patches = np.zeros(((height-input_shape[0])*(width-input_shape[1]),input_shape[0],input_shape[1],3),dtype=np.float32)
for i in range(height-input_shape[0]):
for j in range(width-input_shape[1]):
patches[idx,...] = image[i:i+input_shape[0],j:j+input_shape[1],:]
idx += 1
assert patches.shape[0] == (height-input_shape[0])*(width-input_shape[1])
return patches
def generate_prediction_patchwise(data,fprop,batch_size=100):
results = []
for image in data:
height,width = image.shape[:-1]
image_patches = makepatches(image)
result_patches = generate_prediction(image_patches,fprop,batch_size=100)
result_patches = np.array(result_patches).reshape(len(result_patches),2)
result_image = result_patches.reshape(height-input_shape[0],width-input_shape[1],2)
results.append(result_image)
return results
def prepare_batch(batch,axes,batch_size=100):
if axes == ('c',0,1,'b'):
batch = batch.swapaxes(0, 3).copy()
num_samples = batch.shape[3]
if num_samples < batch_size:
buffer_batch = np.zeros((batch.shape[0],batch.shape[1],batch.shape[2],batch_size),dtype=np.float32)
buffer_batch[:,:,:,0:num_samples] = batch
batch = buffer_batch
elif axes == ('b',0,1,'c'):
num_samples = batch.shape[0]
if num_samples < batch_size:
buffer_batch = np.zeros((batch_size,batch.shape[1],batch.shape[2],batch.shape[3]),dtype=np.float32)
buffer_batch[0:num_samples,:,:,:] = batch
batch = buffer_batch
return (batch,num_samples)
def generate_prediction(data,fprop,batch_size=100):
axes = model.input_space.axes
batches = int(numpy.ceil(data.shape[0] / float(batch_size)))
results = []
for b in xrange(batches):
batch = data[b * batch_size:(b + 1) * batch_size]
#batch = batch.swapaxes(0, 3).copy()
batch,num_samples = prepare_batch(batch,axes,batch_size=100)
#pdb.set_trace()
results_batch = fprop(batch)
if num_samples < batch_size:
results_batch = results_batch[0:num_samples,...]
results.extend(results_batch)
return results
def load_dataset(path_testset):
dir_list = [f for f in os.listdir(path_testset) if isfile(join(path_testset,f)) and ('.jpg' in f or '.png' in f) and '_gt' not in f] #CHANGE THIS LINE ACCORDING TO THE DATASET FILE NAMES
rng = np.random.RandomState(seed=1234)
rng.shuffle(dir_list)
from PIL import Image
images = []
names = []
for f in dir_list[:100]:
img = Image.open(join(path_testset,f))
img = img.resize((74,138),PIL.Image.ANTIALIAS) # the resize shape is (width,height)
img_npy = np.array(img,dtype='float32')
#img_npy = img_npy.flatten()
names.append(f)
images.append(img_npy)
images = np.array(images)
return (images,names)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate the DICE score for a BrainSet')
parser.add_argument('model', type=argparse.FileType('r'),
help='A serialized pylearn2 model.')
#####parser.add_argument('testset', type=str,
# help='The path to test images.'),
#parser.add_argument('patch_shape', type=int,
# help='The size of the input patch window.'),
#parser.add_argument('label_patch_shape', type=int,
# help='The size of the predicted patch window.'),
#parser.add_argument('num_channels', type=int,
# help='Number of channels in the dataset.'),
args = parser.parse_args()
#path_testset = self.testset
path_testset = '/home/local/USHERBROOKE/havm2701/data/Data/Deep_VAMP/INRIA/Test/FramesPos'
result_path = '/home/local/USHERBROOKE/havm2701/git.repos/Deep_VAMP/Deep_VAMP/ConvRecL/test_fullimage/test_results2/'
if not os.path.exists(result_path):
os.makedirs(result_path)
model = cPickle.load(args.model)
del model.layers[-1]
#model.layers[0].input_space.shape = (240,320)
#model.layers[0].desired_space.shape = (240, 320)
pdb.set_trace()
X = model.get_input_space().make_theano_batch()
fprop = theano.function([X], model.fprop(X))
input_shape = model.input_space.shape
#theano.printing.debugprint(f)
#fprop_input_shape = model.get_input_space().shape
testdata,name_testdata = load_dataset(path_testset)
#testdata = testdata[0]
#name_testdata = testdata[1]
testdata = testdata[:500,...]
name_testdata = name_testdata[:500]
#if os.path.exists(fname):
# print fname + ' exists already. skipping'
# continue
testdata = toronto_preprocessing(testdata)
#pdb.set_trace()
prediction = generate_prediction(testdata, fprop)
#prediction = generate_prediction_patchwise(testdata,fprop)
ii = 0
for name, test_image in izip(name_testdata,testdata):
#prob_map = generate_prediction_patchwise(test_image,fprop)
import pdb
#pdb.set_trace()
prob_map = prob_map[0]
pos_map = prob_map[...,1]
#neg_map = prob_map[...,0]
pos_name = join(result_path,name+'_pos.png')
#neg_name = join(result_path,name+'_neg.png')
image_name = join(result_path,name)
scipy.misc.imsave(pos_name, pos_map)
#scipy.misc.imsave(neg_name, neg_map)
scipy.misc.imsave(image_name, test_image)
print name+'>> '+str(ii+1)+' of '+str(len(testdata))
ii+=1
#fhandle = open(fname, 'wb+')
#numpy.save(fhandle, prediction)
#fhandle.close()
|
[
"[email protected]"
] | |
e0bf0736a15032cf463a5b606c455b4e65f89479
|
d2b5e5039b2fb0556ebaadf68c491b7f35cadc32
|
/django_temporal/utils.py
|
5991936b1a8cab12e981c2a89ba43edb10fe2f4e
|
[
"BSD-3-Clause"
] |
permissive
|
hwinkel/django_temporal
|
65d4bac57bbd8ffc92789b8260524ece7f957b74
|
c192a4f2bd052d14ae43795962e2451be22985d2
|
refs/heads/master
| 2021-01-17T12:22:43.948709 | 2014-10-24T21:24:49 | 2014-10-24T21:24:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,107 |
py
|
import csv
import datetime
import logging
import os
import time
from django.db import connection, transaction
from psycopg2.extensions import adapt
from django_temporal.db.models.fields import DATE_CURRENT, TIME_CURRENT
# TODO
# merge in between two times
# documentation
def merge(new_csv, model, timestamp, keys, snapshot='full', copy_fields=None, callback=None, conn=None, valid_field='valid', debug=False):
"""
`new_csv` is a path to a CSV file, containing the records for the model.
`timestamp is the date when the given dataset was valid
`keys` is a list of model fields, which together with valid_field form
a sequenced unique temporal index.
`snapshot` can be either "full" or "delta". If snapshot is "full", it is
assumed the missing records are no longer valid. If snapshot is "delta",
then the records given are updated.
`copy_fields` is a list of fields to be copied from existing records in
the table when updating the records.
`callback` is a function to be called before the end of transaction
as callback(model, timestamp, keys, snapshot, conn)
"""
import time
rdr = csv.reader(open(new_csv))
fields = rdr.next()
assert snapshot in ('full', 'delta')
new_csv = os.path.abspath(new_csv)
if conn is None:
# FIXME
conn = connection
fieldtypes = dict([(f.attname, f.db_type(conn)) for f in model._meta.fields])
valid_field_type = fieldtypes[valid_field]
if valid_field_type == 'daterange':
CURRENT_VALUE = DATE_CURRENT
elif valid_field_type == 'tstzrange':
CURRENT_VALUE = TIME_CURRENT
else:
raise ValueError("Unknown type of valid field")
with transaction.commit_on_success():
cur = conn.cursor()
orig_table = model._meta.db_table
tmptable = orig_table + '_temp'
tmptable_term = orig_table + '_term_temp'
qn = conn.ops.quote_name
#fielddef = ', '.join(['%s varchar(500)' % qn(i) for i in fields])
fielddef = ', '.join(['%s %s NULL' % (qn(i), fieldtypes[i]) for i in fields])
total_t1 = time.time()
if debug:
print 'STARTING STATE'
print '~'*80
import sys
sql = '''COPY ''' + qn(orig_table) + ''' TO stdout WITH CSV HEADER NULL '';'''
cur.copy_expert(sql, sys.stdout)
print '~'*80
sql = 'DROP TABLE IF EXISTS ' + qn(tmptable) + ';'
if debug:
print sql
cur.execute(sql)
sql = 'DROP TABLE IF EXISTS ' + qn(tmptable_term) + ';'
if debug:
print sql
cur.execute(sql)
# First we load the new dump into db as a table
# This table is `tmptable`
logging.debug('Creating table ' + tmptable)
sql = 'CREATE TABLE ' + qn(tmptable) + '(' + fielddef + ');'
if debug:
print sql
cur.execute(sql)
t1 = time.time()
logging.debug('Copying from ' + new_csv)
sql = '''COPY ''' + qn(tmptable) + ''' FROM %s WITH CSV HEADER NULL '';'''
if debug:
print sql % adapt(new_csv).getquoted()
sql = sql % 'stdin'
cur.copy_expert(sql, open(new_csv))
t2 = time.time()
logging.debug('COPY took %.2f seconds' % (t2-t1,))
sql = 'SELECT COUNT(*) FROM %s' % qn(tmptable) + ';'
if debug:
print sql
cur.execute(sql)
count = cur.fetchall()[0][0]
logging.debug('Number of records in input CSV: %d' % count)
logging.debug('Locking table ' + orig_table)
sql = 'LOCK TABLE ' + qn(orig_table) + ' IN ROW EXCLUSIVE MODE;'
if debug:
print sql
cur.execute(sql)
logging.debug('Deleting unchanged records...')
t1 = time.time()
if snapshot == 'full':
logging.debug('Creating index on temporary table')
sql = 'CREATE INDEX ' + qn(tmptable + '_keys_idx') + ' ON ' \
+ qn(tmptable) + '(' + ', '.join([qn(i) for i in keys]) + ');'
if debug:
print sql
cur.execute(sql)
logging.debug('Terminating validity for newly missing records')
# To find out which records have no counterpart in existing table,
# we first make a table containing (temporal) keys from both tables
# side by side.
sql = 'SELECT DISTINCT ' \
+ ', '.join(['%s.%s AS %s' % (qn(orig_table), qn(i), qn("orig_" + i)) for i in keys]) \
+ ', ' \
+ ', '.join(['%s.%s' % (qn(tmptable), qn(i)) for i in keys]) \
+ ' INTO ' + qn(tmptable_term) \
+ ' FROM ' + qn(orig_table) + ' LEFT OUTER JOIN ' + qn(tmptable) + ' ON ' \
+ ' AND '.join(['(%s.%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (qn(orig_table), qn(i), qn(tmptable), qn(i), fieldtypes[i], qn(orig_table), qn(i), qn(tmptable), qn(i)) for i in keys]) \
+ ' AND (' + ' OR '.join(['%s.%s IS NOT NULL' % (qn(orig_table), qn(i)) for i in keys]) + ')' \
+ ' WHERE upper(' + qn(orig_table) + "." + qn(valid_field) + ") = %s ;"
params = [CURRENT_VALUE]
if debug:
print sql % tuple([adapt(i).getquoted() for i in params])
cur.execute(sql, params)
logging.debug('Creating index.')
sql = 'CREATE INDEX ' + qn(tmptable_term + '_idx') \
+ ' ON ' + qn(tmptable_term) \
+ '(' + ', '.join([qn("orig_" + i) for i in keys]) + ');'
if debug:
print sql
cur.execute(sql)
sql = 'ANALYZE ' + qn(tmptable_term) + ';'
if debug:
print sql
cur.execute(sql)
# Delete records which counterpart in new dump, to get those, which
# have gone missing and are to have their validity terminated.
sql = 'DELETE FROM ' + qn(tmptable_term) + " WHERE " \
+ '\n OR '.join(['%s.%s IS NOT NULL' % (qn(tmptable_term), qn(i)) for i in keys]) \
+ ';'
if debug:
print sql
cur.execute(sql)
sql = 'SELECT COUNT(*) FROM ' + qn(tmptable_term) + ';'
if debug:
print sql
cur.execute(sql)
data = cur.fetchall()
logging.debug('Deleted entries count: %d' % data[0][0])
logging.debug('Updating.')
# Terminate validity to records, which have gone missing.
sql = 'UPDATE ' + qn(orig_table) + ' SET ' \
+ qn(valid_field) + " = ('[' || lower(" + qn(valid_field) + ") || ',' || %s || ')')::" + fieldtypes[valid_field] \
+ ' FROM ' + qn(tmptable_term) \
+ ' WHERE upper(' + qn(valid_field) + ') = %s AND ' \
+ '\n AND '.join(['(%s.%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (qn(orig_table), qn(i), qn(tmptable_term), qn('orig_' + i), fieldtypes[i], qn(orig_table), qn(i), qn(tmptable_term), qn('orig_' + i)) for i in keys]) \
+ ';'
params = [timestamp, CURRENT_VALUE]
if debug:
print sql % tuple([adapt(i).getquoted() for i in params])
cur.execute(sql, params)
t2 = time.time()
logging.debug('Terminating validity took %.2f seconds' % (t2-t1))
sql = 'DROP TABLE ' + qn(tmptable_term) + ';'
if debug:
print sql
cur.execute(sql)
t1 = time.time()
# Select keys from current temporal table, that have exact counterparts
# (including non-key fields) in new dump. We use this to see which
# records have not changed.
sql = 'SELECT ' \
+ ', '.join(['%s.%s' % (qn(orig_table), qn(i)) for i in keys]) \
+ ' INTO ' + qn(tmptable_term) \
+ ' FROM ' + qn(orig_table) \
+ ' JOIN ' + qn(tmptable) + ' ON ' \
+ ' AND '.join(['(%s.%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (qn(orig_table), qn(i), qn(tmptable), qn(i), fieldtypes[i], qn(orig_table), qn(i), qn(tmptable), qn(i)) for i in fields]) \
+ ' ' \
+ ' WHERE upper(' + qn(valid_field) + ') = %s AND ' \
+ '\n AND '.join(['(%s.%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (qn(orig_table), qn(i), qn(tmptable), qn(i), fieldtypes[i], qn(orig_table), qn(i), qn(tmptable), qn(i)) for i in fields]) \
+ ';'
params = [CURRENT_VALUE]
if debug:
print sql % tuple([adapt(i).getquoted() for i in params])
cur.execute(sql, params)
# Delete rows from new dump, which have not changed compared to temporal
# table.
sql = 'DELETE FROM ' + qn(tmptable) \
+ ' USING ' + qn(tmptable_term) \
+ ' WHERE ' \
+ '\n AND '.join(
['(%s.%s::%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (
qn(tmptable_term), qn(i), fieldtypes[i], qn(tmptable), qn(i), fieldtypes[i], qn(tmptable_term), qn(i), qn(tmptable), qn(i)) for i in keys]
) \
+ ';'
if debug:
print sql
cur.execute(sql)
t2 = time.time()
logging.debug('Deleting took %.2f' % (t2-t1,))
sql = 'SELECT COUNT(*) FROM %s' % qn(tmptable) + ';'
if debug:
print sql
cur.execute(sql)
count = cur.fetchall()[0][0]
logging.debug('Number of changed or new records in temp table: %d' % count)
logging.debug('Adding changed items')
# First terminate validity to records in temporal table. New records
# will have same key, starting with current time.
sql = 'UPDATE ' + qn(orig_table) + " SET " + qn(valid_field) + " = ('[' || lower(" + qn(valid_field) + ") || ',' || %s || ')'):: " + fieldtypes[valid_field] \
+ ' FROM ' + qn(tmptable) \
+ " WHERE upper(" + qn(valid_field) + ") = %s AND " \
+ ' AND '.join(
['(%s.%s::%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (
qn(orig_table), qn(i), fieldtypes[i], qn(tmptable), qn(i), fieldtypes[i], qn(orig_table), qn(i), qn(tmptable), qn(i)) for i in keys]
) \
+ ';'
params = [timestamp, CURRENT_VALUE]
if debug:
print sql % tuple([adapt(i).getquoted() for i in params])
cur.execute(sql, params)
print '~'*30
# Insert new records into temporal table, with current time as start of
# validity. This covers both updated and new records.
if copy_fields is None:
copy_fields = []
copy_field_spec = []
copy_fields_from = ''
else:
copy_field_spec = ['%s.%s::%s' % (qn(orig_table), qn(i), fieldtypes[i]) for i in copy_fields]
copy_fields_from = ' LEFT OUTER JOIN ' + qn(orig_table) + ' ON ' \
+ ' AND '.join(
['(%s.%s::%s=%s.%s::%s OR (%s.%s IS NULL AND %s.%s IS NULL))' % (
qn(orig_table), qn(i), fieldtypes[i], qn(tmptable), qn(i), fieldtypes[i], qn(orig_table), qn(i), qn(tmptable), qn(i)) for i in keys]
) \
+ ' AND upper(' + qn(orig_table) + '.' + qn(valid_field) + ') = %s'
sql = 'INSERT INTO ' + qn(orig_table) + '(' + ','.join([qn(i) for i in fields + copy_fields + [valid_field]]) + ') ' \
+ ' SELECT DISTINCT ' + \
', '.join(['%s.%s::%s' % (qn(tmptable), qn(i), fieldtypes[i]) for i in fields] + \
copy_field_spec + \
["('[' || %s || ',' || %s || ')')::" + fieldtypes[valid_field]]) \
+ ' FROM ' + qn(tmptable) + copy_fields_from + ';'
if copy_fields:
params = [timestamp, CURRENT_VALUE, timestamp]
else:
params = [timestamp, CURRENT_VALUE]
if debug:
print sql % tuple([adapt(i).getquoted() for i in params])
cur.execute(sql, params)
logging.debug('Dropping temporary table ' + tmptable)
cur.execute('DROP TABLE ' + qn(tmptable) + ';')
sql = 'DROP TABLE ' + qn(tmptable_term) + ';'
if debug:
print sql
cur.execute(sql)
sql = 'SAVEPOINT merge_complete;'
if debug:
print sql
cur.execute(sql)
if callback is not None and callable(callback):
logging.info('Calling callback.')
callback(model=model, timestamp=timestamp, keys=keys, snapshot=snapshot, conn=conn)
total_t2 = time.time()
logging.info('Total time: %.2f seconds.' % (total_t2-total_t1))
|
[
"[email protected]"
] | |
72d7699bddf94214ff38384c276b9e713e3248d0
|
44da94c0a05e079df944748a9331a4b8d45ae182
|
/Practice_100/p70.py
|
cabccc7f1fc7c59c15a518acb6009aafd1c12e5f
|
[] |
no_license
|
Tamalika1995/Udemy_Practice
|
7af1f3f07cda26614be20032e2e02d86a4e43856
|
3f2286584205ae847c8686584f6de78a5a6382af
|
refs/heads/master
| 2023-06-16T14:02:31.371663 | 2021-06-22T03:32:12 | 2021-06-22T03:32:12 | 377,487,991 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 206 |
py
|
'''Please write a program to output a random even number between 0 and 10 inclusive using
random module and list comprehension.'''
import random
l=random.choice([i for i in range(0,11) if i%2==0])
print(l)
|
[
"[email protected]"
] | |
f811204e6610601c7ff459d4b0fc1fd20076ac1d
|
1b8762f9402a7e30aad3ba31da5bf3d5d147a21e
|
/robotDescription/testURDF.py
|
7df1cf7b5aae72178a997052be69ddda80f8ccd6
|
[] |
no_license
|
MTlachac/nlp-waypoints
|
e033d05e8a04ae99dae773902363cc904f555c15
|
a2018992059b5b3d574a284ddcf3901d36f41865
|
refs/heads/master
| 2023-02-02T17:45:22.611285 | 2020-12-23T22:59:32 | 2020-12-23T22:59:32 | 308,916,098 | 2 | 1 | null | 2020-11-27T15:11:09 | 2020-10-31T15:48:09 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 838 |
py
|
import time
import pybullet as p
import pybullet_data
# joint ID to visualize (move and print info and state)
JOINT_ID = 3
client = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [0, 0, 2]
botId = p.loadURDF("hyq.urdf", startPos)
n = p.getNumJoints(botId)
print("Number of joints:", n)
print("Joint " + str(JOINT_ID) + ":")
print(p.getJointInfo(botId,3))
for i in range(10000):
p.stepSimulation()
time.sleep(1./240.)
p.setJointMotorControl2(bodyUniqueId = botId,
jointIndex = JOINT_ID,
controlMode = p.VELOCITY_CONTROL,
targetVelocity = 10,
force = 1000)
print(p.getJointState(botId, JOINT_ID))
p.disconnect()
|
[
"[email protected]"
] | |
8d255dc70184d1d04011cfdba72a559e6b28d7bd
|
33427f7eb333835deaf83732a9acda705aba16ac
|
/venv/lib/python3.6/site-packages/app/__main__.py
|
751464d3d16c5e94695a021f6e8c572e784119d0
|
[] |
no_license
|
tberal/geru
|
d12047be4c0e40984935e4030254702b23f22aff
|
a090e6ea0c8b7d006169e486aa9ee6a740003a39
|
refs/heads/master
| 2021-08-28T16:13:34.067106 | 2017-12-12T17:17:17 | 2017-12-12T17:17:17 | 114,020,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 827 |
py
|
from waitress import serve
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
session_factory = SignedCookieSessionFactory('gerutestkey')
def main(**settings):
config = Configurator(settings=settings)
config.set_session_factory(session_factory)
config.include('pyramid_chameleon')
config.add_route('random', '/quotes/random')
config.add_route('index', '/')
config.add_route('quotes', '/quotes')
config.add_route('quote', '/quotes/{quote_number}')
config.add_route('actions', '/actions')
config.add_route('session_actions', '/actions/{session_id}')
config.add_route('sessions', '/sessions')
config.scan('app.views')
return config.make_wsgi_app()
if __name__ == '__main__' :
app=main()
serve(app, host='0.0.0.0', port=5000)
|
[
"[email protected]"
] | |
704b8094862dc15eea3c0bba95ffb5a928bd1cdd
|
a7ecc51a9c3dd86690fe4e5358901af9f598af80
|
/code/generator/ContentGenerator.py
|
4e871f79ede16635896ab510fc9e8049d7990ef2
|
[] |
no_license
|
Maciejfsafew/PopularitySimulation
|
31387eae32a9535c63bc57f423ce2a25daa0340f
|
36a56262f49fd02777cca8f247a740795594db7d
|
refs/heads/master
| 2016-09-06T13:07:43.386601 | 2014-06-08T21:28:47 | 2014-06-08T21:28:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32,723 |
py
|
# -*- coding: utf-8 -*-
import random
from code.datamodel.Category import Category
from code.datamodel.Content import Content
__author__ = 'mjjaniec'
class ContentGenerator(object):
def generate_content(self):
return random.choice(self.contents)
@staticmethod
def _generate_categories(hint):
interests = []
sum = 0.0
hint_space = 0.5 + random.random() / 2
rest_space = 1.0 - hint_space
for i in xrange(random.randint(0, 3)):
val = random.expovariate(1)
interests.append(val)
sum += val
interests = map(lambda x: rest_space * x / sum, interests)
result = {hint: hint_space}
if len(interests) > 0:
for val in interests:
category = None
while True:
category = random.choice(Category)
if not category in result:
break
result[category] = val
else:
result[hint] = 1.0
return result
def __init__(self):
self.contents = [
Content(""" Muse - Greatest Hits (2012) Full Album """, 0.591944787009,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Madness """, 0.769186601583, ContentGenerator._generate_categories("music")),
Content(""" muse """, 0.188081359228, ContentGenerator._generate_categories("music")),
Content(""" Muse - The 2nd Law (Full Album) """, 0.548707612686,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Uprising """, 0.776071250433, ContentGenerator._generate_categories("music")),
Content(""" Muse - Live at iTunes Festival 2012 (Full HD 1080p) """, 0.638099768796,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Supremacy (Official Video) """, 0.690957924091,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Resistance """, 0.751213854425, ContentGenerator._generate_categories("music")),
Content(""" Live At Rome Olympic Stadium FULL HD """, 0.553555234085,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Hysteria """, 0.722403669408, ContentGenerator._generate_categories("music")),
Content(""" Muse - Feeling Good (Video) """, 0.726683721633,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Knights Of Cydonia (Video) """, 0.685996824963,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Undisclosed Desires """, 0.749687122185, ContentGenerator._generate_categories("music")),
Content(""" Muse - Austin City Limits 2013 (Full) (HD) """, 0.549372008224,
ContentGenerator._generate_categories("music")),
Content(""" Muse - Bliss """, 0.704095216618, ContentGenerator._generate_categories("music")),
Content(""" Muse - Follow Me """, 0.676271149085, ContentGenerator._generate_categories("music")),
Content(""" Muse - Starlight (Video) """, 0.747597819425, ContentGenerator._generate_categories("music")),
Content(""" Muse- Coachella Music & Arts Festival 2014 Weekend One [PROSHOT] """, 0.378504495833,
ContentGenerator._generate_categories("music")),
Content(""" Muse- The 2nd Law (Full Album) """, 0.421653535742,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - Nothing Else Matters (official video clip) """, 0.758775203589,
ContentGenerator._generate_categories("music")),
Content(""" Metallica Playlist """, 0.600174322912, ContentGenerator._generate_categories("music")),
Content(""" Metallica-The Black Album-[Full Album] """, 0.588620693101,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - Nimes 2009 [Full Concert] HD.mp4 """, 0.619802332445,
ContentGenerator._generate_categories("music")),
Content(""" Metallica S&M 1999 Full Concert """, 0.576802284671,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - Enter Sandman [Official Music Video] """, 0.737146837426,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - Greatest Hits [Full Album] Vol..1 """, 0.641972632097,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - The Day That Never Comes [Official Music Video] """, 0.690831416278,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - Master Of Puppets (Live) """, 0.67526647853,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - One """, 0.786731621272, ContentGenerator._generate_categories("music")),
Content(""" Metallica - Quebec Magnetic+Bonus - Full HD """, 0.458513347533,
ContentGenerator._generate_categories("music")),
Content(""" Klasyczne albumy rocka - Metallica - The Black Album """, 0.482936810799,
ContentGenerator._generate_categories("music")),
Content(""" Metallica playlist """, 0.279098847509, ContentGenerator._generate_categories("music")),
Content(""" Metallica - Freeze 'Em All: Live in Antarctica (FULL CONCERT) [HD] """, 0.598818514227,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - Fade To Black - Tłumaczenie """, 0.587370821249,
ContentGenerator._generate_categories("music")),
Content(""" Metallica Through The Never - zwiastun PL """, 0.54652817026,
ContentGenerator._generate_categories("music")),
Content(""" Metallica - The Unforgiven """, 0.743345718935, ContentGenerator._generate_categories("music")),
Content(""" Metallica - ...And Justice for All (live Seattle, 1989) """, 0.572341544252,
ContentGenerator._generate_categories("music")),
Content(""" Metallica: Am I Evil? (Live w/ The Big 4) [The Big 4: Live in Sofia, Bulgaria] """,
0.478356772838, ContentGenerator._generate_categories("music")),
Content(""" Holy - Jesus Culture (Lyrics/Subtitles) (Best Worship Song for Jesus) """, 0.576441601196,
ContentGenerator._generate_categories("religion")),
Content(""" Life Of Jesus Christ New Full Movie │ Documentary 2013 │ """, 0.555925888793,
ContentGenerator._generate_categories("religion")),
Content(""" Best of Just For Laughs Gags - Best Jesus Pranks """, 0.663565089791,
ContentGenerator._generate_categories("religion")),
Content(""" Jesus Teaches a Samaritan Woman """, 0.539002849859,
ContentGenerator._generate_categories("religion")),
Content(""" Satanist Discovers That Jesus Christ Always Renders Satan Powerless! """, 0.561134725222,
ContentGenerator._generate_categories("religion")),
Content(""" The Life Of Jesus Christ - LDS - Full Movie - Best Quality... """, 0.551867183644,
ContentGenerator._generate_categories("religion")),
Content(""" Archaeological Evidence for the Biblical tale of the Execution of Jesus [FULL DOCUMENTARY] """,
0.347304880509, ContentGenerator._generate_categories("religion")),
Content(""" The Jesus Movie 1979 Full """, 0.643777449591,
ContentGenerator._generate_categories("religion")),
Content(""" Jews for Jesus """, 0.263245729218, ContentGenerator._generate_categories("religion")),
Content(""" Jesus is not God? Zakir Naik answered on Jesus or Muhammad by David Wood and Sam Shamoun """,
0.406937205431, ContentGenerator._generate_categories("religion")),
Content(""" Encounters With Jesus - Healing - Heaven - Hell - Angels - Miracles - Sid Roth """,
0.443678262499, ContentGenerator._generate_categories("religion")),
Content(""" Jesus Will Survive - Jesus Christ! The Musical """, 0.701103398919,
ContentGenerator._generate_categories("religion")),
Content(""" BBC - The Story of Jesus Part 2 """, 0.374004672405,
ContentGenerator._generate_categories("religion")),
Content(""" Jesús, el maestro de Nazaret: Primera Parte """, 0.467829070172,
ContentGenerator._generate_categories("religion")),
Content(""" Man Dies And Meets Jesus Christ In Heaven ! ( Near Death Experience ) """, 0.538387996697,
ContentGenerator._generate_categories("religion")),
Content(""" 2014-2015 Blood Moons Jesus coming Perry Stone """, 0.500970154141,
ContentGenerator._generate_categories("religion")),
Content(""" Jesus Gassed By Nazis In Stomach-Churning Ad [VIDEO] """, 0.48071221845,
ContentGenerator._generate_categories("religion")),
Content(""" JESUS (English) """, 0.594017020625, ContentGenerator._generate_categories("religion")),
Content(""" The Hidden Story of Jesus - Documentary """, 0.533818148619,
ContentGenerator._generate_categories("religion")),
Content(""" 1 hora de música con Jesús Adrián Romero — Adoración Vol.1 [AudioHD] """, 0.714834752441,
ContentGenerator._generate_categories("religion")),
Content(""" TOP 100 Goals in Football History ᴴᴰ """, 0.680602312464,
ContentGenerator._generate_categories("sport")),
Content(""" Ultimate Best Football Tricks & Skills """, 0.620764863721,
ContentGenerator._generate_categories("sport")),
Content(""" Comedy Football 2011 - (part 1-2) """, 0.707148043055,
ContentGenerator._generate_categories("sport")),
Content(""" Nike Football: Winner Stays. ft. Ronaldo, Neymar Jr., Rooney, Ibrahimović, Iniesta & more """,
0.734213247517, ContentGenerator._generate_categories("sport")),
Content(""" K-State Football | Kaiden's Play """, 0.437318794991,
ContentGenerator._generate_categories("sport")),
Content(""" Vanier Football Challenge """, 0.404202392725, ContentGenerator._generate_categories("sport")),
Content(""" ESPN Fan Hall of Famer Canaan Sandy Scores at Spring Football Game """, 0.364748077317,
ContentGenerator._generate_categories("sport")),
Content(""" AMERICAN FOOTBALL TRAINING WITH WEIGHTS """, 0.387203966797,
ContentGenerator._generate_categories("sport")),
Content(""" Insane Football Skills Show 2014 | ep.3 """, 0.373647618203,
ContentGenerator._generate_categories("sport")),
Content(
""" Nike Football Commercial: Winner Stays. ft. Ronaldo, Neymar Jr., Rooney, Ibrahimović, Iniesta """,
0.4030680664, ContentGenerator._generate_categories("sport")),
Content(""" This Is Football 2012/13 1080p - Best Moments """, 0.58330169954,
ContentGenerator._generate_categories("sport")),
Content(""" Nike Football: Flyknit Mercurial Superfly IV """, 0.54448641829,
ContentGenerator._generate_categories("sport")),
Content(""" Nike Football Ronaldo Neymar Commercial REVIEW """, 0.426904570966,
ContentGenerator._generate_categories("sport")),
Content(""" Best Funny Football Moments 2013 | HD """, 0.536954423803,
ContentGenerator._generate_categories("sport")),
Content(""" The BEST Street Football/Futsal/Freestyle Skills EVER!! ★ HD """, 0.650776576648,
ContentGenerator._generate_categories("sport")),
Content(""" Top 15 College Football Plays of 2013-14 (HD) """, 0.5565379047,
ContentGenerator._generate_categories("sport")),
Content(
""" Nike Football: Winner Stays. ft. Ronaldo, Neymar Jr., Rooney, Ibrahimović, Iniesta & more (2014) """,
0.369381485389, ContentGenerator._generate_categories("sport")),
Content(""" Funny Football moments 2013 - 2014 HD """, 0.531360572168,
ContentGenerator._generate_categories("sport")),
Content(""" THE Football Free Kick Battle 2012 | freekickerz vs. Joltter | Vol.2 """, 0.659042115204,
ContentGenerator._generate_categories("sport")),
Content(""" Football's fight club - Part One - UK documentary """, 0.58110666912,
ContentGenerator._generate_categories("sport")),
Content(""" Makeup Collection & Storage! Part 1 ♥ 화장품 컬랙션 & 보관대 """, 0.389987519602,
ContentGenerator._generate_categories("fashion")),
Content(""" Makeup Collection & Storage! Part 2 ♥ 화장품 컬랙션 & 보관대 """, 0.377063112778,
ContentGenerator._generate_categories("fashion")),
Content(""" Arabian Look (with subs) - Linda Hallberg Makeup Tutorials """, 0.398578561777,
ContentGenerator._generate_categories("fashion")),
Content(""" panacea81 """, 0.268124123738, ContentGenerator._generate_categories("fashion")),
Content(""" Easy Fresh Face Makeup Tutorial """, 0.523517228042,
ContentGenerator._generate_categories("fashion")),
Content(""" My Milan Makeup Bag | ViviannaDoesMakeup """, 0.387783189149,
ContentGenerator._generate_categories("fashion")),
Content(""" Prom Makeup Tutorial """, 0.667283756112, ContentGenerator._generate_categories("fashion")),
Content(""" Spring Makeup Favourites | Hello October """, 0.383708315082,
ContentGenerator._generate_categories("fashion")),
Content(""" makeup Tutorial Scarlett Johansson inspired """, 0.466101717011,
ContentGenerator._generate_categories("fashion")),
Content(""" Red Lips Makeup tutorial """, 0.434527568462, ContentGenerator._generate_categories("fashion")),
Content(""" Picture Perfect for Prom: Colored Glamour ∙ Makeup Tutorial """, 0.470836990366,
ContentGenerator._generate_categories("fashion")),
Content(""" Beyoncé Inspired MakeUp Tutorial / Evening MakeUp / Photo Shoot MakeUp """, 0.533020672383,
ContentGenerator._generate_categories("fashion")),
Content(""" The Perfect Spring Look: Makeup, Hair, & Outfit! """, 0.418258582546,
ContentGenerator._generate_categories("fashion")),
Content(""" Prom Makeup Tutorial & Hair Tutorial | TheMakeupChair """, 0.467110801781,
ContentGenerator._generate_categories("fashion")),
Content(""" Kim Kardashian ♡ Hair & Makeup | Glamorous Warm Neutral Eyes & Red Lip! """, 0.520333754819,
ContentGenerator._generate_categories("fashion")),
Content(""" Big Sephora Haul ♥ Makeup, Skincare, and More! """, 0.513818897709,
ContentGenerator._generate_categories("fashion")),
Content(""" Not My Arms Makeup Challenge w/ my Husband "Night on the Town Look" """, 0.485915631744,
ContentGenerator._generate_categories("fashion")),
Content(""" Ep 6: Yhomira | Giving Back Glam with ||Superwoman|| // I love makeup. """, 0.402950552543,
ContentGenerator._generate_categories("fashion")),
Content(""" Kim Kardashian Inspired Makeup Tutorial New """, 0.451658825281,
ContentGenerator._generate_categories("fashion")),
Content(""" Pony's Beauty Diary - Play 101 Pencil Makeup (청초/코랄/레트로 메이크업) """, 0.504802206765,
ContentGenerator._generate_categories("fashion")),
Content(""" MinutePhysics """, 0.210037054512, ContentGenerator._generate_categories("history")),
Content(""" 100 Greatest Discoveries - Physics """, 0.513480163423,
ContentGenerator._generate_categories("history")),
Content(""" For the Love of Physics (May 16, 2011) """, 0.599351514588,
ContentGenerator._generate_categories("history")),
Content(""" Introduction to Physics: Educational Film """, 0.45562421779,
ContentGenerator._generate_categories("history")),
Content(""" Quantum Physics & Microscopic Universe [Full Documentary] """, 0.364904263409,
ContentGenerator._generate_categories("history")),
Content(""" A Crash Course In Particle Physics (1 of 2) """, 0.5425760764,
ContentGenerator._generate_categories("history")),
Content(""" Allan Adams: The discovery that could rewrite physics """, 0.495264232648,
ContentGenerator._generate_categories("history")),
Content(""" Common Physics Misconceptions """, 0.638863911625,
ContentGenerator._generate_categories("history")),
Content(""" COLD HARD SCIENCE. The Controversial Physics of Curling - Smarter Every Day 111 """,
0.555869417379, ContentGenerator._generate_categories("history")),
Content(
""" iTTV SPM Form 4 Physics Chapter 2 Force And Motion (Linear Motion) -Tuition/Lesson/Exam/Tips """,
0.459658610069, ContentGenerator._generate_categories("history")),
Content(""" Fay Dowker - Spacetime Atoms and the Unity of Physics (Perimeter Public Lecture) """,
0.486148661792, ContentGenerator._generate_categories("history")),
Content(""" 0rbitalis - 2D Gravity Physics Puzzle Game on Steam Early Access """, 0.420115122766,
ContentGenerator._generate_categories("history")),
Content(""" Introduction to Particle Physics Part 1/4 """, 0.444496552335,
ContentGenerator._generate_categories("history")),
Content(""" Physics. """, 0.264345267649, ContentGenerator._generate_categories("history")),
Content(""" AN AWESOME FUTURE! Michio Kaku - Physics of the Future """, 0.361384182188,
ContentGenerator._generate_categories("history")),
Content(""" COLD HARD SCIENCE.The Physics of Skating on Ice (With SlowMo) - Smarter Every Day 110 """,
0.597877332828, ContentGenerator._generate_categories("history")),
Content(""" Particles, Fields and The Future of Physics - A Lecture by Sean Carroll """, 0.480187685376,
ContentGenerator._generate_categories("history")),
Content(""" Quantum Physics Debunks Materialism """, 0.48696892485,
ContentGenerator._generate_categories("history")),
Content(""" Stanford Professor Andrei Linde celebrates physics breakthrough """, 0.645255321642,
ContentGenerator._generate_categories("history")),
Content(
""" Michio Kaku ★ Quantum Physics God And Science Multiverse Theory Quantum Mechanics - Universe """,
0.396004245573, ContentGenerator._generate_categories("history")),
Content(""" FilmWebTV """, 0.285733249643, ContentGenerator._generate_categories("movies")),
Content(""" Filmweb rozmawia z Januszem Gajosem """, 0.452972543061,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb rozmawia z Wojciechem Smarzowskim """, 0.31535099893,
ContentGenerator._generate_categories("movies")),
Content(""" Hobbit Pustkowie Smauga zwiastun Filmweb """, 0.258658730467,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb na premierze "Miłości" """, 0.335926616461,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb na premierze "W ukryciu" """, 0.316554107672,
ContentGenerator._generate_categories("movies")),
Content(""" Aplikacja Filmweb na Samsung Smart TV """, 0.340329214516,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb rozmawia z twórcami "Czekając na sobotę" """, 0.560513366538,
ContentGenerator._generate_categories("movies")),
Content(""" Bezstronnie Polecamy Extra: Filmweb """, 0.328103336725,
ContentGenerator._generate_categories("movies")),
Content(""" XBMC - Ustawienie scrapera FilmWeb """, 0.349234125325,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb rozmawia z Billem Nighym """, 0.268214507637,
ContentGenerator._generate_categories("movies")),
Content(""" "Filmweb - 10 to za mało" - film 2 """, 0.350392680419,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb Offline 2009 """, 0.460370718267, ContentGenerator._generate_categories("movies")),
Content(""" Filmweb - recenzja aplikacji na Androida i IOS """, 0.267760695272,
ContentGenerator._generate_categories("movies")),
Content(""" Percy Jackson Morze potworów (polski dubbing) Filmweb """, 0.404151111296,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb Offline 2011 """, 0.342226145081, ContentGenerator._generate_categories("movies")),
Content(""" Cleanskin 2012 Asa | Cleanskin Filmweb | Fresh Clean Skin """, 0.151851393988,
ContentGenerator._generate_categories("movies")),
Content(""" MooViDb - Filmweb API w akcji """, 0.238381536598,
ContentGenerator._generate_categories("movies")),
Content(""" Filmweb.pl od kuchni cz.2 """, 0.244870631991, ContentGenerator._generate_categories("movies")),
Content(""" Filmweb rozmawia z gwiazdą "Życia Pi" - Suraj Sharmą """, 0.353781909507,
ContentGenerator._generate_categories("movies")),
Content(""" Documentary World War 2 II in Colour The Second World War color """, 0.617248908911,
ContentGenerator._generate_categories("history")),
Content(""" Secret Stories Of World War 2 - National Geographic Documentary """, 0.604931572794,
ContentGenerator._generate_categories("history")),
Content(""" World War Two: Germany invades Russia 1941 """, 0.569919230621,
ContentGenerator._generate_categories("history")),
Content(""" World War II Blitzkrieg """, 0.405480450022, ContentGenerator._generate_categories("history")),
Content(""" Tankies: Tank Heroes of World War II, Episode 1 of 2 """, 0.589296899229,
ContentGenerator._generate_categories("history")),
Content(""" World War 2 1945 Documentory - Hitler in Colour - Real Footage - by roothmens """,
0.437845241148, ContentGenerator._generate_categories("history")),
Content(""" Inside World War II """, 0.564815892025, ContentGenerator._generate_categories("history")),
Content(""" World War Two: Soviet Union 1943 """, 0.582319178072,
ContentGenerator._generate_categories("history")),
Content(""" World War II: Crash Course World History #38 """, 0.623314008943,
ContentGenerator._generate_categories("history")),
Content(
""" WW2 - Japanese Invasion of China | The Second Sino-Japanese War: 1937-45 | SHOCKING WWII Documentary """,
0.534186366056, ContentGenerator._generate_categories("history")),
Content(""" World War II in Europe: Every Day """, 0.633461396822,
ContentGenerator._generate_categories("history")),
Content(""" World War Two: The End 1945 """, 0.558065816169,
ContentGenerator._generate_categories("history")),
Content(""" World War Two Documentary - 132 Minutes """, 0.566512770004,
ContentGenerator._generate_categories("history")),
Content(""" Victor Davis Hanson - World War II Leadership """, 0.422659990521,
ContentGenerator._generate_categories("history")),
Content(""" World War II in Europe and the Pacific: Every Day """, 0.532386442584,
ContentGenerator._generate_categories("history")),
Content(""" Brutal Combat in Second World War (graphic) """, 0.589658242963,
ContentGenerator._generate_categories("history")),
Content(""" World History - Causes of World War II """, 0.446622979171,
ContentGenerator._generate_categories("history")),
Content(""" Second World War 'The True Story' untold """, 0.318469143082,
ContentGenerator._generate_categories("history")),
Content(""" Antony Beevor, Author, "The Second World War" """, 0.387349498226,
ContentGenerator._generate_categories("history")),
Content(""" Historical maps 1 - World War II """, 0.524741101875,
ContentGenerator._generate_categories("history")),
Content(""" How to Save Money by Eliminating 10 Things You Don't Need """, 0.556186073475,
ContentGenerator._generate_categories("money")),
Content(""" 10 TIPS ON HOW TO SAVE MONEY $$ """, 0.482994669594,
ContentGenerator._generate_categories("money")),
Content(""" Suze Orman: To really save money, do this... """, 0.512936114735,
ContentGenerator._generate_categories("money")),
Content(""" The Absolute Best Ways To Save Money """, 0.467137651518,
ContentGenerator._generate_categories("money")),
Content(""" Saving Money: How to Make It Feel Good """, 0.393731724776,
ContentGenerator._generate_categories("money")),
Content(""" How to Start Saving Money """, 0.30817072701, ContentGenerator._generate_categories("money")),
Content(""" Saving Money - How To Save Money (Money Management) """, 0.375807882225,
ContentGenerator._generate_categories("money")),
Content(""" Tips for Saving Money on Water Heating """, 0.430734625537,
ContentGenerator._generate_categories("money")),
Content(""" Frugal Living, practical tips for saving money - Lynnae McCoy - theDove.us """, 0.47424973222,
ContentGenerator._generate_categories("money")),
Content(""" Super savers share secrets to saving money and building bank """, 0.44126956916,
ContentGenerator._generate_categories("money")),
Content(""" How To Save A Lot Of Money Every Year By Saving Change & Dollar Bills """, 0.434163233578,
ContentGenerator._generate_categories("money")),
Content(""" Smart ways to save money """, 0.40652808711, ContentGenerator._generate_categories("money")),
Content(""" MONEY SAVING TIPS!!! """, 0.389376176206, ContentGenerator._generate_categories("money")),
Content(""" Keith Chen: Could your language affect your ability to save money? """, 0.503376140879,
ContentGenerator._generate_categories("money")),
Content(""" Saving Money for College """, 0.474722530868, ContentGenerator._generate_categories("money")),
Content(""" Saving Money """, 0.296988164375, ContentGenerator._generate_categories("money")),
Content(""" 10 Easy Ways To Save Money Every Month """, 0.501684538275,
ContentGenerator._generate_categories("money")),
Content(""" Personal Finance-Saving Money, Investing, and Retirement Planning Part 1 """, 0.388195497134,
ContentGenerator._generate_categories("money")),
Content(""" (Cheap Comcast Service Trick) Saving Money On Your Comcast Internet and Cable Bill """,
0.464520605086, ContentGenerator._generate_categories("money")),
Content(""" DIS Unplugged - Saving Money On Your DCL Vacation - 04/01/14 """, 0.342012084809,
ContentGenerator._generate_categories("money")),
Content(""" ja putin pl """, 0.537773232098, ContentGenerator._generate_categories("politics")),
Content(""" Rzeźnik Putin: Morderstwa i Zamachy FSB / Rebellion: The Litvinenko Case [Lektor PL] """,
0.556638327566, ContentGenerator._generate_categories("politics")),
Content(
""" UFO Sightings Edward Snowden Leaks Information To President Putin? Incredible UFO Videos 2014 """,
0.351600623039, ContentGenerator._generate_categories("politics")),
Content(""" Putin: Internet began as CIA project """, 0.438390508698,
ContentGenerator._generate_categories("politics")),
Content(""" The Trap! Obama and Putin Would Save One Another, But Would Watch Us All Die! """,
0.407158747051, ContentGenerator._generate_categories("politics")),
Content(""" Putin - Największa Zaraza tego Świata """, 0.468369529283,
ContentGenerator._generate_categories("politics")),
Content(""" Putin Hangs Phone Up In Obama's Face """, 0.320844135644,
ContentGenerator._generate_categories("politics")),
Content(""" Obama: I'd Absolutely Save Putin From Drowning """, 0.366294661433,
ContentGenerator._generate_categories("politics")),
Content(""" BREAKING! Putin Halts All Talks With White House; WAR DRUMS LOUD ENOUGH YET """, 0.323248786635,
ContentGenerator._generate_categories("politics")),
Content(""" Obama: Putin Views World Through Cold War Prism """, 0.344591541395,
ContentGenerator._generate_categories("politics")),
Content(""" Putin on the future Russia Ukraine relationships """, 0.388184096832,
ContentGenerator._generate_categories("politics")),
Content(""" PUTIN ORDERS Air defence exercises in Russia warning to obama """, 0.374601110775,
ContentGenerator._generate_categories("politics")),
Content(""" Putin & Obama Go On "Dr. Phil" Show """, 0.546491111811,
ContentGenerator._generate_categories("politics")),
Content(""" Czy Putin zatrzyma sie na Krymie - Max Kolonko Mówi Jak Jest """, 0.554965666561,
ContentGenerator._generate_categories("politics")),
Content(""" Putin in der Zwickmühle? Dirk Müller Tagesausblick 25.04.2014 - die Bananenrepublik """,
0.421719431801, ContentGenerator._generate_categories("politics")),
Content(""" Putin warns of "consequences" if Ukraine military continues crackdown """, 0.411055604276,
ContentGenerator._generate_categories("politics")),
Content(""" RUSSIA-PUTIN / UKRAINE RUMOUR OF WORLD WAR 3 | See DESCRIPTION """, 0.309447112864,
ContentGenerator._generate_categories("politics")),
Content(""" Putin: Jeszcze Polska nie zginęła 20.12.2012 """, 0.438962672393,
ContentGenerator._generate_categories("politics")),
Content(""" Putin: How was the decision about Crimea made """, 0.408834875229,
ContentGenerator._generate_categories("politics")),
Content(""" Obama: I would absolutely save Putin from drowning """, 0.454046712558,
ContentGenerator._generate_categories("politics")),
]
|
[
"[email protected]"
] | |
f158340c9f8150c06fa4b5b77e47089dcdbcd870
|
0e3c059a4e21a8a5066dba04117cb87cac61f389
|
/2020_3/projeto2/antlr4-python3-runtime-4.7.2/src/antlr4/Lexer.py
|
a60b5bcc93ab380142e3598060a5796361e1ada9
|
[
"MIT"
] |
permissive
|
damorim/compilers-cin
|
fdc8e23d052cb8e1eb2db0f1edc43b6c62f55ea1
|
e2f3a18e4cded92276b9def254452910f28faa50
|
refs/heads/master
| 2021-07-08T09:19:24.062071 | 2021-05-01T20:56:23 | 2021-05-01T20:56:23 | 34,734,087 | 26 | 39 |
MIT
| 2020-10-02T04:11:43 | 2015-04-28T14:04:29 |
Python
|
UTF-8
|
Python
| false | false | 11,229 |
py
|
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A lexer is recognizer that draws input symbols from a character stream.
# lexer grammars result in a subclass of self object. A Lexer object
# uses simplified match() and error recovery mechanisms in the interest
# of speed.
#/
from io import StringIO
from typing.io import TextIO
import sys
from antlr4.CommonTokenFactory import CommonTokenFactory
from antlr4.atn.LexerATNSimulator import LexerATNSimulator
from antlr4.InputStream import InputStream
from antlr4.Recognizer import Recognizer
from antlr4.Token import Token
from antlr4.error.Errors import IllegalStateException, LexerNoViableAltException, RecognitionException
class TokenSource(object):
pass
class Lexer(Recognizer, TokenSource):
DEFAULT_MODE = 0
MORE = -2
SKIP = -3
DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL
HIDDEN = Token.HIDDEN_CHANNEL
MIN_CHAR_VALUE = 0x0000
MAX_CHAR_VALUE = 0x10FFFF
def __init__(self, input:InputStream, output:TextIO = sys.stdout):
super().__init__()
self._input = input
self._output = output
self._factory = CommonTokenFactory.DEFAULT
self._tokenFactorySourcePair = (self, input)
self._interp = None # child classes must populate this
# The goal of all lexer rules/methods is to create a token object.
# self is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return self object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set self to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self._token = None
# What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self._tokenStartCharIndex = -1
# The line on which the first character of the token resides#/
self._tokenStartLine = -1
# The character position of first character within the line#/
self._tokenStartColumn = -1
# Once we see EOF on char stream, next token will be EOF.
# If you have DONE : EOF ; then you see DONE EOF.
self._hitEOF = False
# The channel number for the current token#/
self._channel = Token.DEFAULT_CHANNEL
# The token type for the current token#/
self._type = Token.INVALID_TYPE
self._modeStack = []
self._mode = self.DEFAULT_MODE
# You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set self instance var.
#/
self._text = None
def reset(self):
# wack Lexer state variables
if self._input is not None:
self._input.seek(0) # rewind the input
self._token = None
self._type = Token.INVALID_TYPE
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = -1
self._tokenStartColumn = -1
self._tokenStartLine = -1
self._text = None
self._hitEOF = False
self._mode = Lexer.DEFAULT_MODE
self._modeStack = []
self._interp.reset()
# Return a token from self source; i.e., match a token on the char
# stream.
def nextToken(self):
if self._input is None:
raise IllegalStateException("nextToken requires a non-null input stream.")
# Mark start location in char stream so unbuffered streams are
# guaranteed at least have text of current token
tokenStartMarker = self._input.mark()
try:
while True:
if self._hitEOF:
self.emitEOF()
return self._token
self._token = None
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = self._input.index
self._tokenStartColumn = self._interp.column
self._tokenStartLine = self._interp.line
self._text = None
continueOuter = False
while True:
self._type = Token.INVALID_TYPE
ttype = self.SKIP
try:
ttype = self._interp.match(self._input, self._mode)
except LexerNoViableAltException as e:
self.notifyListeners(e) # report error
self.recover(e)
if self._input.LA(1)==Token.EOF:
self._hitEOF = True
if self._type == Token.INVALID_TYPE:
self._type = ttype
if self._type == self.SKIP:
continueOuter = True
break
if self._type!=self.MORE:
break
if continueOuter:
continue
if self._token is None:
self.emit()
return self._token
finally:
# make sure we release marker after match or
# unbuffered char stream will keep buffering
self._input.release(tokenStartMarker)
# Instruct the lexer to skip creating a token for current lexer rule
# and look for another token. nextToken() knows to keep looking when
# a lexer rule finishes with token set to SKIP_TOKEN. Recall that
# if token==null at end of any token rule, it creates one for you
# and emits it.
#/
def skip(self):
self._type = self.SKIP
def more(self):
self._type = self.MORE
def mode(self, m:int):
self._mode = m
def pushMode(self, m:int):
if self._interp.debug:
print("pushMode " + str(m), file=self._output)
self._modeStack.append(self._mode)
self.mode(m)
def popMode(self):
if len(self._modeStack)==0:
raise Exception("Empty Stack")
if self._interp.debug:
print("popMode back to "+ self._modeStack[:-1], file=self._output)
self.mode( self._modeStack.pop() )
return self._mode
# Set the char stream and reset the lexer#/
@property
def inputStream(self):
return self._input
@inputStream.setter
def inputStream(self, input:InputStream):
self._input = None
self._tokenFactorySourcePair = (self, self._input)
self.reset()
self._input = input
self._tokenFactorySourcePair = (self, self._input)
@property
def sourceName(self):
return self._input.sourceName
# By default does not support multiple emits per nextToken invocation
# for efficiency reasons. Subclass and override self method, nextToken,
# and getToken (to push tokens into a list and pull from that list
# rather than a single variable as self implementation does).
#/
def emitToken(self, token:Token):
self._token = token
# The standard method called to automatically emit a token at the
# outermost lexical rule. The token object should point into the
# char buffer start..stop. If there is a text override in 'text',
# use that to set the token's text. Override self method to emit
# custom Token objects or provide a new factory.
#/
def emit(self):
t = self._factory.create(self._tokenFactorySourcePair, self._type, self._text, self._channel, self._tokenStartCharIndex,
self.getCharIndex()-1, self._tokenStartLine, self._tokenStartColumn)
self.emitToken(t)
return t
def emitEOF(self):
cpos = self.column
lpos = self.line
eof = self._factory.create(self._tokenFactorySourcePair, Token.EOF, None, Token.DEFAULT_CHANNEL, self._input.index,
self._input.index-1, lpos, cpos)
self.emitToken(eof)
return eof
@property
def type(self):
return self._type
@type.setter
def type(self, type:int):
self._type = type
@property
def line(self):
return self._interp.line
@line.setter
def line(self, line:int):
self._interp.line = line
@property
def column(self):
return self._interp.column
@column.setter
def column(self, column:int):
self._interp.column = column
# What is the index of the current character of lookahead?#/
def getCharIndex(self):
return self._input.index
# Return the text matched so far for the current token or any
# text override.
@property
def text(self):
if self._text is not None:
return self._text
else:
return self._interp.getText(self._input)
# Set the complete text of self token; it wipes any previous
# changes to the text.
@text.setter
def text(self, txt:str):
self._text = txt
# Return a list of all Token objects in input char stream.
# Forces load of all tokens. Does not include EOF token.
#/
def getAllTokens(self):
tokens = []
t = self.nextToken()
while t.type!=Token.EOF:
tokens.append(t)
t = self.nextToken()
return tokens
def notifyListeners(self, e:LexerNoViableAltException):
start = self._tokenStartCharIndex
stop = self._input.index
text = self._input.getText(start, stop)
msg = "token recognition error at: '" + self.getErrorDisplay(text) + "'"
listener = self.getErrorListenerDispatch()
listener.syntaxError(self, None, self._tokenStartLine, self._tokenStartColumn, msg, e)
def getErrorDisplay(self, s:str):
with StringIO() as buf:
for c in s:
buf.write(self.getErrorDisplayForChar(c))
return buf.getvalue()
def getErrorDisplayForChar(self, c:str):
if ord(c[0])==Token.EOF:
return "<EOF>"
elif c=='\n':
return "\\n"
elif c=='\t':
return "\\t"
elif c=='\r':
return "\\r"
else:
return c
def getCharErrorDisplay(self, c:str):
return "'" + self.getErrorDisplayForChar(c) + "'"
# Lexers can normally match any char in it's vocabulary after matching
# a token, so do the easy thing and just kill a character and hope
# it all works out. You can instead use the rule invocation stack
# to do sophisticated error recovery if you are in a fragment rule.
#/
def recover(self, re:RecognitionException):
if self._input.LA(1) != Token.EOF:
if isinstance(re, LexerNoViableAltException):
# skip a char and try again
self._interp.consume(self._input)
else:
# TODO: Do we lose character or line position information?
self._input.consume()
|
[
"[email protected]"
] | |
c4c440d4f7e22c09561d6861a4dd80339e4b7534
|
23c1d920580966cee3a617b81475994a52dbca3e
|
/dictalchemy/tests/test_utils.py
|
767437680427a1574b18caac7f8888ab4043ed60
|
[
"MIT"
] |
permissive
|
iuantu/dictalchemy
|
cf98eb71b83f1ffb0cba11fb313b3761ede8ee5b
|
fa12c1cae17d35f57c20d68df81f3a694677050d
|
refs/heads/master
| 2020-11-29T10:49:45.083463 | 2019-12-25T11:57:36 | 2019-12-25T11:57:36 | 230,096,116 | 0 | 0 |
MIT
| 2019-12-25T11:54:03 | 2019-12-25T11:54:02 | null |
UTF-8
|
Python
| false | false | 1,833 |
py
|
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division
import unittest
import dictalchemy.tests as tests
from dictalchemy import make_class_dictable
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer
engine = create_engine('sqlite:///:memory:', echo=False)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(engine)
class MakeClassDictable(Base):
__tablename__ = 'makeclassdictable'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
class TestAsdict(unittest.TestCase):
def setUp(self):
""" Recreate the database """
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
def tearDown(self):
Base.metadata.drop_all()
def test_make_class_dictable(self):
assert not hasattr(MakeClassDictable, 'asdict')
m = MakeClassDictable('dictable')
self.session.add(m)
self.session.commit()
assert not hasattr(m, 'asdict')
make_class_dictable(MakeClassDictable)
assert m.asdict() == {'id': m.id, 'name': m.name}
class TestMakeDictable(tests.TestCase):
def test_dict(self):
named = tests.Named('a name')
self.session.add(named)
self.session.commit()
assert dict(named) == {'id': named.id, 'name': 'a name'}
def test_arg_to_dict():
from dictalchemy.utils import arg_to_dict
assert arg_to_dict(None) == {}
assert arg_to_dict([]) == {}
assert arg_to_dict(['a', 'b']) == {'a': {}, 'b': {}}
assert arg_to_dict({
'a': {'is_a': True},
'b': {'is_b': True},
}) == {'a': {'is_a': True}, 'b': {'is_b': True}}
|
[
"[email protected]"
] | |
aaa174b7d5b9a39f124aded81196211c16574ee5
|
c31046b84964bf7a9843582a4e2ecd4e8e2ed51a
|
/Unit7Labs/Lab_7A.py
|
dca8f7029be90f39b0e5bf91399cf5549b7c31fc
|
[] |
no_license
|
Foxx2019/FoxxWaiss-Python
|
ecfb6d2e22c67be4171af138c48527d24bb091a9
|
f36dd9773df1e29f7d55f8cbfe89af6ecd5201aa
|
refs/heads/master
| 2021-07-18T20:34:41.677533 | 2019-01-17T17:11:09 | 2019-01-17T17:11:09 | 146,913,003 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,243 |
py
|
class Petclass():
petType= "cage free pet"
def __init__(self, pType, pName, pBreed):
self.type = pType
self.name = pName
self.breed = pBreed
def getName(self):
return(str(self.name))
def getBreed(self):
return(str(self.breed))
def whatItIs(self):
print(self.type, self.name, self.breed)
class Cageclass():
petType= " caged pet"
def __init__(self, pType, pDanger):
self.type = pType
self.danger = pDanger
def whatDanger(self):
if self.danger == "T":
return("You have a dangerous " + self.type + ".")
if self.danger == "F":
return ("You have a safe " + self.type + ".")
myPet1 = Petclass("Dog","Skipper","Golden Retriever")
print("The pet's name is " + myPet1.name + " and it is a " + myPet1.type)
myPet2 = Petclass("Cat", "Wiskers", "Siberian" )
print("This pet's name is " + myPet2.name + " and it is a " + myPet2.type)
myCage1 = Cageclass("Snake", "T" )
print("This is a " + myCage1.type)
print("This is a" + myCage1.petType)
print(myCage1.whatDanger())
myCage2 = Cageclass("Rat", "F" )
print( "This is a " + myCage2.type)
print("This is a" + myCage2. petType)
print(myCage2. whatDanger())
|
[
"[email protected]"
] | |
e615006a23c81dc60b0a5cdc99d864b0a4c4a7d4
|
c724fad90be2e221cb0f5c0005ebcfbdfdb35d27
|
/backend/fitsii_19945/wsgi.py
|
cfa9f31b691c6399a7797d950bc243dc2bb70ac9
|
[] |
no_license
|
crowdbotics-apps/fitsii-19945
|
d461349a510febd39f4edcaeb2b8b722664e3bf0
|
040621b4053e58b9c323ef7222a6a36465c4806e
|
refs/heads/master
| 2022-12-07T18:18:50.580128 | 2020-09-02T16:56:11 | 2020-09-02T16:56:11 | 292,342,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
"""
WSGI config for fitsii_19945 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fitsii_19945.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
e1b14ee4ee75a098c5be24097ca8b03f11ca23c0
|
2e2e37a8e2cde7a63b9e462ca2fbc2a067f20be1
|
/packt.py
|
bc2f5f4a565cd4903616bbf762557bb2f81ee2b9
|
[] |
no_license
|
xhimalaya/system_setup
|
678f938a58e6638c94df132fe3fb9a14b0e9dfd1
|
1b0a1f2a28c77c18116d6fb1dc49b75652b947d6
|
refs/heads/master
| 2022-11-15T07:11:54.705088 | 2020-07-06T16:51:47 | 2020-07-06T16:51:47 | 254,872,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,104 |
py
|
import scapy as scapy
from scapy_http import http
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interface", dest="interface",
help="Interface name")
options = parser.parse_args()
return options
def sniff_packet(interface):
scapy.sniff(iface=interface, store=False, prn=process_packets)
def get_url(packet):
return packet[http.HTTPRequest].Host + packet[http.HTTPRequest].Path
def get_credentials(packet):
if packet.haslayer(scapy.Raw):
load = packet[scapy.Raw].load
keywords = ["login", "password", "username", "user", "pass"]
for keyword in keywords:
if keyword in load:
return load
def process_packets(packet):
if packet.haslayer(http.HTTPRequest):
url = get_url(packet)
print("[+] Http Request >> " + url)
credentials = get_credentials(packet)
if credentials:
print("[+] Possible username/passowrd " + credentials + "\n\n")
options = get_arguments()
sniff_packet(options.interface)
|
[
"[email protected]"
] | |
ec43d0a8ba6d2d861b171eb30b9c4e57b58132d4
|
723c5112e886f46b2ea20624ca114db53f86b2fb
|
/VCsite/mainapp/models.py
|
7f184e99c044f0fc87f1a296f7c446ed8aefc5a5
|
[] |
no_license
|
JungChaeMoon/MyProfile
|
2bba0c989135ff80d93e905a20c9f5e4726513bc
|
e756a4a95215648dcbb93ec47ee134d3c875ca40
|
refs/heads/master
| 2022-12-24T11:04:38.419171 | 2019-06-18T04:35:31 | 2019-06-18T04:35:31 | 190,045,463 | 0 | 0 | null | 2022-12-03T13:43:16 | 2019-06-03T16:50:19 |
HTML
|
UTF-8
|
Python
| false | false | 304 |
py
|
from django.db import models
from datetime import datetime
# Create your models here.
class Comment(models.Model):
comment_text = models.CharField(max_length=100)
pub_date = models.DateTimeField('date published', default=datetime.now())
def __str__(self):
return self.comment_text
|
[
"[email protected]"
] | |
36f60d587f725070a2ff8b01f994e92c4b097cc5
|
efe9ef79f0e38cefe0d896ef8f0fe8b6acd79234
|
/longestChain.py
|
07b1ed37e47aa18e08100c48acccd967ff8aec48
|
[] |
no_license
|
kchanhee/pythonpractice
|
ed97c30aaccd509836ec313bf75eee268d202be7
|
a2cd3a1930f0f8c4d4581b9071f7b1121a0b62c0
|
refs/heads/master
| 2020-12-30T12:35:36.514990 | 2017-11-20T08:00:18 | 2017-11-20T08:00:18 | 91,395,309 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 940 |
py
|
def longest_chain(w):
words = set()
for word in w:
words.add(word)
max_chain = 0
for word in words:
if len(word) <= max_chain: # skip word if it cannot be greater than max_chain
continue
max_candidate = find_longest_chain(word, words, 0, [ 0 ])
max_chain = max(max_candidate, max_chain)
return max_chain
def find_longest_chain(word, words, current_chain, max_chain):
if word not in words: # set: O(1) --> better than list: O(n)
return 0
current_chain += 1
max_chain[0] = current_chain
for i in range(len(word)):
new_word = word[:i] + word[i+1:]
find_longest_chain(new_word, words, current_chain, max_chain)
return max_chain[0]
if __name__ == "__main__":
w = [ "a", "b", "ba", "bca", "bda", "bdca" ]
print(longest_chain(w))
w = [ "bdcafg", "bdcaf", "a", "b", "ba", "bca", "bda", "bdca" ]
print(longest_chain(w))
|
[
"[email protected]"
] | |
99d27433a0fd4c8152c28b420d73c12dabf8b703
|
131cb30e1e0a636e798ffed8dfc7395048f52f72
|
/python/conferencerates.py
|
3097d0c2b2e3f6c84b37d99ec357831d22eb02dd
|
[
"Unlicense"
] |
permissive
|
robotprogrammer22/Astro-Budget-Site
|
fef31af181dd58c2698021183e74cd6a8bb4750e
|
b30aaf8a86b1a5d2f184b5a5506dfd939eade392
|
refs/heads/master
| 2020-06-03T03:17:11.224177 | 2019-06-28T16:44:31 | 2019-06-28T16:44:31 | 191,413,323 | 0 | 0 |
Unlicense
| 2019-06-11T16:50:41 | 2019-06-11T16:50:41 | null |
UTF-8
|
Python
| false | false | 742 |
py
|
from sqlalchemy import Column, Integer, String, Float, DateTime, ForeignKey, Sequence
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class ConferenceRates(Base):
__tablename__ = 'conferencerates'
conferencerateid = Column (Integer, Sequence('conferencerates_conferencerateid_seq'), primary_key=True)
conferenceid = Column (Integer, ForeignKey("conferences.conferenceid"), nullable=False)
effectivedata = Column (DateTime)
perdiem = Column (Float)
registration = Column (Float)
groundtransport = Column (Float)
airfare = Column (Float)
def __repr__(self):
return "<ConferenceRates(perdiem='%d', registration='%s')>" % (self.perdiem, self.registration)
|
[
"[email protected]@a6478e1e-ec95-11dc-b814-c3d4bc21ddcb"
] |
[email protected]@a6478e1e-ec95-11dc-b814-c3d4bc21ddcb
|
4eee527a569be132019dc5c3961bcede255e3ed6
|
4bdc163f846c0e9900fdc5a2a27e803d152afc49
|
/parser_.py
|
3b98bb4b0caaf7304f5aea96f928257783543d66
|
[] |
no_license
|
navedrizvi/PyLox
|
44822f6577a95a2424f1585c381740ade45c7a76
|
2e1d45f4e67e79e1b5ed92c663520534c6924d87
|
refs/heads/master
| 2023-07-17T14:57:33.557961 | 2021-08-07T11:53:09 | 2021-08-07T11:53:09 | 266,264,202 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,338 |
py
|
from token_ import Token, TokenType
from expr import Binary, Expr, Grouping, Literal, Unary
class ParseError(RuntimeError):
pass
class Parser:
current = 0
def __init__(self, tokens: [Token]):
self.tokens = tokens
def _expression(self) -> Expr:
return self._equality()
#Rule- equality → comparison ( ( "!=" | "==" ) comparison )*
def _equality(self) -> Expr:
expr = self._comparison()
while self._match('BANG_EQUAL', 'EQUAL_EQUAL'):
operator: Token = self._previous(
) #Since match() advances the parser
right: Expr = self._comparison()
expr = Binary(expr, operator, right)
return expr
def _match(self, *types) -> bool:
for type_ in types:
if self._check(type_):
self._advance()
return True
return False
def _check(self, type_: TokenType) -> bool:
if self._is_at_end():
return False
return self._peek().token_type_repr == type_
def _advance(self) -> Token:
if not self._is_at_end():
self.current += 1
return self._previous()
def _is_at_end(self):
return self._peek().token_type_repr == 'EOF'
def _peek(self) -> Token:
return self.tokens[self.current]
def _previous(self) -> Token:
return self.tokens[self.current - 1]
#Rule- comparison → addition ( ( ">" | ">=" | "<" | "<=" ) addition )*
def _comparison(self) -> Expr:
expr = self._addition()
while self._match('LESS', 'LESS_EQUAL', 'GREATER', 'GREATER_EQUAL'):
operator = self._previous()
right = self._addition()
expr = Binary(expr, operator, right)
return expr
def _addition(self):
expr = self._multiplication()
while self._match('MINUS', 'PLUS'):
operator = self._previous()
right = self._multiplication()
expr = Binary(expr, operator, right)
return expr
def _multiplication(self):
expr = self._unary()
while self._match('SLASH', 'STAR'):
operator = self._previous()
right = self._unary()
expr = Binary(expr, operator, right)
return expr
#Rule- unary → ( "!" | "-" ) unary | primary
def _unary(self):
if self._match('BANG', 'MINUS'):
operator = self._previous()
right = self._unary()
return Unary(operator, right)
return self._primary()
#Rule- primary → NUMBER | STRING | "false" | "true" | "nil" | "(" expression ")"
def _primary(self):
if self._match('FALSE'):
return Literal(False)
if self._match('TRUE'):
return Literal(True)
if self._match('nil'):
return Literal(None)
if self._match('NUMBER', 'STRING'):
return Literal(self._previous().literal)
if self._match('LEFT_PAREN'):
expr = self._expression()
self._consume('RIGHT_PAREN', "Expected ')' after expression.")
return Grouping(expr)
raise self._error(self._peek(), "Expected an expression.")
def _consume(self, msg: str):
if self._check():
return self._advance()
raise self._error(self._peek(), msg)
def _error(self, token, msg):
from lox import Lox
Lox.error_token(token, msg)
return ParseError()
def _synchronize(self):
self._advance()
while not self._is_at_end():
if self._previous().token_type_repr == 'SEMICOLON':
return
curr_token = self._peek().token_type_repr
if curr_token == 'CLASS':
pass
elif curr_token == 'FUN':
pass
elif curr_token == 'VAR':
pass
elif curr_token == 'FOR':
pass
elif curr_token == 'IF':
pass
elif curr_token == 'WHILE':
pass
elif curr_token == 'PRINT':
pass
elif curr_token == 'RETURN':
pass
self._advance()
def parse(self):
try:
return self._expression()
except ParseError:
return None
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.