blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a292f0646f44750049a15d70ad355287e0aa934b
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/0301-0400/0388-Longest Absolute File Path/0388-Longest Absolute File Path.py
|
a86a2ef91fb97202c7e1d7bd2e4cdf25e89d83c6
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 |
MIT
| 2020-10-02T12:47:47 | 2017-08-08T05:57:26 |
C++
|
UTF-8
|
Python
| false | false | 544 |
py
|
class Solution:
def lengthLongestPath(self, input: str) -> int:
lens = [0]
maxLen = 0
for line in input.splitlines():
name = line.lstrip('\t')
level = len(line) - len(name)
if '.' in name:
maxLen = max(maxLen, lens[level] + len(name))
else:
if level + 1 == len(lens):
lens.append(lens[-1] + 1 + len(name))
else:
lens[level + 1] = lens[level] + 1 + len(name)
return maxLen
|
[
"[email protected]"
] | |
554e9c560c05e8744d177bda39f2ebc0fa78afc4
|
809bb5ff767470177dcc1452a129b36e720a92d8
|
/app.py
|
7c4d24440d7b9552659d62a38a36278d8cf733cc
|
[] |
no_license
|
luismorenolopera/GTA-San-Andreas-Cheats
|
cc3eb65eb07b7d782c4977b385b40be2fcf8c6c3
|
4264fa0af8785df6968f1d2c80f6cb5cd27a84c9
|
refs/heads/master
| 2020-03-26T22:36:52.671801 | 2018-08-21T02:50:48 | 2018-08-21T02:50:48 | 145,473,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,552 |
py
|
from flask import jsonify, Flask
from flask_cors import CORS
import ctypes
import time
SendInput = ctypes.windll.user32.SendInput
KEYBOARD_LAYOUT = {
'a': 0x1E,
'b': 0x30,
'c': 0x2E,
'd': 0x20,
'e': 0x12,
'f': 0x21,
'g': 0x22,
'h': 0x23,
'i': 0x17,
'j': 0x24,
'k': 0x25,
'l': 0x26,
'm': 0x32,
'n': 0x31,
'o': 0x18,
'p': 0x19,
'q': 0x10,
'r': 0x13,
's': 0x1F,
't': 0x14,
'u': 0x16,
'v': 0x2F,
'w': 0x11,
'x': 0x2D,
'y': 0x15,
'z': 0x2C
}
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput(0, hexKeyCode, 0x0008 | 0x0002, 0,ctypes.pointer(extra))
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def write_cheat(code):
for char in code:
if char in KEYBOARD_LAYOUT:
PressKey(KEYBOARD_LAYOUT[char])
time.sleep(0.05)
ReleaseKey(KEYBOARD_LAYOUT[char])
app = Flask(__name__)
CORS(app)
@app.route("/<code>")
def hello(code):
try:
write_cheat(code)
except Exception as e:
raise e
return jsonify({'message': 'Truco activado'})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"[email protected]"
] | |
df5d678d5bb80cd69c6c31ce24c0d9c56aa58f17
|
389db82b0ec1708856a328c5b6a48ac11515da80
|
/tests/test_innovation_tracker.py
|
056ef9e529fcfdbd653d04dfbb8ee19484bb3fd6
|
[
"MIT"
] |
permissive
|
seallard/walker
|
ba728dce7dcb45e8ee25be0e488ca4cee29e16eb
|
a2f0cdcba72f3756b80aadcc9e2de20160b567d8
|
refs/heads/develop
| 2023-02-24T00:29:42.427867 | 2021-01-30T18:20:59 | 2021-01-30T18:20:59 | 280,452,176 | 1 | 0 |
MIT
| 2020-11-20T10:00:55 | 2020-07-17T14:56:25 |
Python
|
UTF-8
|
Python
| false | false | 1,905 |
py
|
from neat.genome import Genome
from neat.innovation_tracker import InnovationTracker
from neat.enums.node_types import NodeType
from unittest.mock import Mock
def test_initialisation(standard_config):
tracker = InnovationTracker(standard_config)
assert tracker.node_innovations == {}
assert tracker.link_innovations == {}
assert tracker.next_link_id == 0
assert tracker.next_node_id == 3
def test_single_node_innovation(genome):
tracker = genome.tracker
genome.mutate_add_node()
assert len(tracker.node_innovations) == 1, "one node innovation"
assert len(tracker.link_innovations) == len(genome.links), "two link innovations"
assert genome.nodes[-1].id == tracker.next_node_id - 1, "correct node id assigned"
assert genome.links[-2].id == tracker.next_link_id - 2, "correct id assigned to in link"
assert genome.links[-1].id == tracker.next_link_id - 1, "correct id assigned to out link"
def test_multiple_node_innovations(genome):
tracker = genome.tracker
genome.mutate_add_node()
genome.mutate_add_node()
assert len(tracker.node_innovations) == 2, "two node innovations added"
assert len(tracker.link_innovations) == len(genome.links), "four link innovations added"
def test_existing_node_innovation(standard_config):
tracker = InnovationTracker(standard_config)
genome_1 = Genome(id=1, config=standard_config, tracker=tracker)
genome_2 = Genome(id=2, config=standard_config, tracker=tracker)
genome_1.mutate_add_node()
genome_2.mutate_add_node()
assert len(genome_1.nodes) == len(genome_2.nodes)
assert genome_1.nodes[-1].id == genome_2.nodes[-1].id, "node received existing innovation id"
assert genome_1.links[-2].id == genome_2.links[-2].id, "in link received existing innovation id"
assert genome_1.links[-1].id == genome_2.links[-1].id, "out link received existing innovation id"
|
[
"[email protected]"
] | |
fcf75f039f6043a33f23f04dd0cea6d963dd148a
|
1d23383666131f26898088b162cb2b5354b29a78
|
/Project/www/model/component/employee_operate.py
|
2c3fd2240456e10e552309e1226e4190b5565765
|
[] |
no_license
|
PhoebeLxx/BDICCOMP3030JG6666
|
38b8439a908d9ad3a03d77e87b317ef3eb36dce4
|
acd62c402bc5a7441a86495e5e86241cc5ff5d6c
|
refs/heads/master
| 2020-05-07T22:28:55.115441 | 2019-04-12T02:22:14 | 2019-04-12T02:22:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
from www.model.component.database_basic.whats_your_name import Employee,db
def search_id(id):
return Employee.query.filter_by(id = id).first()
def login(id, password):
emp = search_id(id)
assert(emp is not None),'No such id'
assert(emp.check_password_hash(password)),'Wrong password'
return "Login successfully"
def create(id, password):
emp = search_id(id)
assert(emp is not None),'Already exist'
db.session.add(Employee(id = id, password = password))
db.session.commit()
return 'Create employee Successfully'
def update_password(id, new_password):
emp = Employee.query.filter_by(id=id).first()
assert (emp is not None), "No such employee"
assert (not emp.check_password_hash(new_password)),"New password is same as old Password"
emp.password = new_password
return 'Update password successfully'
def delete(id):
emp = Employee.query.filter_by(id=id).first()
assert (emp is not None), "No such employee"
db.session.delete(emp)
db.session.commit()
return 'successful'
|
[
"[email protected]"
] | |
6ac71a91aa708c87b3a472cdd872779881393794
|
c956106b4cdfb38711406945a9e0014a2536f2f2
|
/example/management/grabbers/multi_start.py
|
50564a3793a953e0f980d3c66246dd390d7b5c97
|
[] |
no_license
|
kyromen/FED
|
641f83e90b2b4cfc0a9c882d069fda501d8ec2d0
|
626598db4bf8cc88d318bba657ce62fa47dc1240
|
refs/heads/master
| 2020-03-27T04:28:31.796315 | 2014-11-12T16:29:34 | 2014-11-12T16:29:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,275 |
py
|
# -*- coding: utf-8 -*-
import multiprocessing as mp
def multi_start(grabber_cls, browser_name='phanomjs', quantity_browsers=None):
grabber = grabber_cls(0, browser_name)
pages = grabber.grab_count_of_pages('future')
grabber.driver.quit()
nCPU = mp.cpu_count()
if quantity_browsers is None:
quantity_browsers = nCPU * 2
if pages < quantity_browsers:
tasks = pages
else:
tasks = pages / 2
while 1:
if tasks / quantity_browsers < 1:
quantity_browsers -= 1
else:
break
jobers = quantity_browsers
q = pages / tasks
r = pages % tasks
jobs = []
start_page = 0
for i in range(tasks):
count_of_pages = q
if r > 0:
count_of_pages += 1
r -= 1
if count_of_pages == 0:
break
jobs.append([start_page, count_of_pages])
start_page += count_of_pages
queue = mp.JoinableQueue()
for job in jobs:
queue.put(job)
for i in range(jobers):
queue.put(None)
workers = []
for i in range(jobers):
worker = mp.Process(target=grabber.start, args=[queue, grabber_cls, browser_name])
workers.append(worker)
worker.start()
queue.join()
|
[
"[email protected]"
] | |
4b556bd0fc09b70d3a213a1229ffaa2c3d18a95a
|
e2898ed9a1ad76dbe092918c0e284ad0f9e47c8e
|
/Session_2/2-read-angle-sensor-on-A1/code.py
|
c5a037d387e6cd3a989dbbf4b8549db7160bd0ab
|
[] |
no_license
|
paulmirel/MICA-EIR-workshop-fall-2020
|
a379c468e5be6a32f824864618be24f63639f611
|
38cdc0dc601635e86f774db001ae9a7692ea559f
|
refs/heads/main
| 2023-01-23T09:21:40.536916 | 2020-11-15T21:03:37 | 2020-11-15T21:03:37 | 308,174,036 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
import time
import board
import analogio
angle_sensor = analogio.AnalogIn(board.A1)
while True:
angle_normalized = round( angle_sensor.value/ 65536, 3) #the value runs from 0 to 65535, which is 2^16, 16 bits
print( angle_normalized )
time.sleep( 0.2 )
|
[
"[email protected]"
] | |
f4b6b3b54722f1bf13e0ee05342a1ed5d29e92b5
|
738234d0787ead3ea2e6c4f284b2971455f36aed
|
/src/customers/migrations/0001_initial.py
|
0cf4fd601c7214bda5f0a49bc9cfe0edbf9cac7d
|
[] |
no_license
|
Housechowks/python_inventory_management_system
|
9d8383e37a1f5b0c26e04925e2b5448a79096e8a
|
a993a6cc3dd04dde21337c9ba7eeaadbddd444e9
|
refs/heads/master
| 2023-08-10T11:37:52.897412 | 2021-09-23T06:07:01 | 2021-09-23T06:07:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 584 |
py
|
# Generated by Django 3.2.6 on 2021-08-25 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='custmer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('logo', models.ImageField(default='no_picture.jpg', upload_to='customers')),
],
),
]
|
[
"[email protected]"
] | |
ec1dd7f6d10aabdf5b4535629a198a0f7fcf03ef
|
7d0eca00fe04585ab42d6237f3c59e37ef578ea9
|
/jair_work_step_four_evaluation/anomalous_roc_auc.py
|
d43b4e52c32342020e04195b9f6f3d4d1cb42cc3
|
[] |
no_license
|
gaohuiru/jair_anomaly_detection
|
43d7189161dfc9bdcacb74758901b32c9a171d87
|
35467fac52eec9959c67521d3053bda1fd32b512
|
refs/heads/master
| 2023-07-31T08:18:13.665671 | 2021-09-10T00:09:54 | 2021-09-10T00:09:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,841 |
py
|
import sys
sys.path.append("../time_series")
sys.path.append("../anomaly_detection_methods")
import anomaly_detection_methods_helpers as ah
from anomalous_method import anomalous
from time_series import TimeSeries
from os import listdir
from os.path import isfile, join
import joblib
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc
import math
import matplotlib.pyplot as plt
num_scores = 10
gaussian_window_sizes = [128, 256,512,768,1024]
mypath = "../jair_work_step_one_determine_characteristics/"
for f in listdir(mypath):
roc_auc_dict_list = []
auc_list = []
for score_number in range(0,num_scores):
for gaussian_window_size in gaussian_window_sizes:
if "ts_object" in f:
ts = joblib.load(mypath + f)
# parameter section
if ts.get_length() > 1000:
ts_length = 100
else:
ts_length = 25
# see https://github.com/robjhyndman/anomalous-acm/issues/4
if ts.name in ["art_daily_flatmiddle_filled.csv", "ambient_temperature_system_failure_nofill.csv"]:
ts_length = 500
name = ts.name
result_dict = joblib.load("../jair_work_step_three_anomaly_detection/anomalous_scores/anomalous_scores_" + str(score_number) + "_" + name + "_ts_length_" + str(ts_length))
scores = [0 if math.isnan(x) else float(x) for x in result_dict["Anomaly Scores"]]
y = [0 if math.isnan(x) else int(x) for x in ts.dataframe["outlier"].values]
fpr, tpr, thresholds = roc_curve(y, scores, pos_label=1)
roc_auc = auc(fpr, tpr)
roc_auc_dict = {"TS Name": ts.name, "FPRS": fpr, "TPRS": tpr, "Thresholds": thresholds, "AUC": roc_auc, "Dict Name": "anomalous_roc_auc_" + str(score_number) + "_" + name + "_ts_length_" + str(ts_length)}
roc_auc_dict_name = roc_auc_dict["Dict Name"]
joblib.dump(roc_auc_dict, "anomalous_roc_auc_all/" + roc_auc_dict_name)
roc_auc_dict_list.append(roc_auc_dict)
auc_list.append(roc_auc)
if roc_auc_dict_list:
# print(auc_list)
max_index = auc_list.index(max(auc_list))
# print(max_index)
best_roc_auc_dict = roc_auc_dict_list[max_index]
fpr = best_roc_auc_dict["FPRS"]
tpr = best_roc_auc_dict["TPRS"]
roc_auc = best_roc_auc_dict["AUC"]
roc_auc_dict_name = best_roc_auc_dict["Dict Name"]
# print(best_roc_auc_dict["Thresholds"])
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC (AUC = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Anomalous ROC for ' + ts.name[:-4])
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
# save the one with the best auc in the best folder
# joblib.dump(best_roc_auc_dict, "anomalous_roc_auc_best/" + roc_auc_dict_name)
|
[
"[email protected]"
] | |
10c8b4295839d66871d8a08b1a8e0fc7c5fa0f74
|
23096da6b8963968d4434c86b9c9b74f30e5f7fe
|
/src/controller/aa_controller.py
|
48cdf3560a9f42091f00b4043beb4f73c1d91f4f
|
[] |
no_license
|
moajo/GitRPG
|
a846f6bdb5dfc5351c1d9c3a79a097382b768a59
|
25cb83eef33d5b6d219f9ca23b1ec6c9e56b2079
|
refs/heads/master
| 2021-06-24T15:36:27.548674 | 2017-09-12T17:29:00 | 2017-09-12T17:29:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 613 |
py
|
#!/usr/bin/env python
import os
def game_over_aa():
with open(os.path.dirname(os.path.abspath(__file__)) + "/../../ascii_art/game_over.txt", "r") as file:
return file.read()
def level_up_aa():
with open(os.path.dirname(os.path.abspath(__file__)) + "/../../ascii_art/level_up.txt", "r") as file:
return file.read()
def push_force_aa():
# with open(os.path.dirname(os.path.abspath(__file__)) + "/../../ascii_art/explosion.ansi", "r") as file:
with open(os.path.dirname(os.path.abspath(__file__)) + "/../../ascii_art/explosion_100.txt", "r") as file:
return file.read()
|
[
"[email protected]"
] | |
ed4f37ae58ddbf794be73cb6a56f2fab06722fda
|
ceaa7e3a939f94a460e5b6578e9c1afbd88bb822
|
/main/migrations/0018_auto_20201218_1226.py
|
42039853c0dd77d265c0cc2ee5e6622731f7a604
|
[] |
no_license
|
abdelrhman-adel-ahmed/Enhanced_Blog
|
881948224a21e080733f826d6cce75caacb3c4b7
|
d234c3061b2f23a38d255e9ad39c57a1040b5bca
|
refs/heads/master
| 2023-07-18T01:03:34.674246 | 2021-08-27T08:17:58 | 2021-08-27T08:17:58 | 322,244,847 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
# Generated by Django 3.1.3 on 2020-12-18 10:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0017_auto_20201217_1622'),
]
operations = [
migrations.AlterField(
model_name='post',
name='featured',
field=models.BooleanField(null=True),
),
]
|
[
"[email protected]"
] | |
c30fab634121af94fb9e9d5724cb9fed36d15892
|
b310bd767af1a6207ad2a8b19a3611fadc545f8d
|
/static/DB/read_excel.py
|
26b231aadcba0466429f236a8e38ebb5e183c923
|
[] |
no_license
|
thenam8293/heroku_sm
|
87138d69c926ed92b6f2048c0dcfba262e803be4
|
eecf3b7539876fa621c58114a22eb87749964773
|
refs/heads/master
| 2020-03-09T09:10:55.954603 | 2018-04-14T04:55:19 | 2018-04-14T04:55:19 | 128,706,668 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,894 |
py
|
# -*- coding: utf8 -*-
import pypyodbc
import xlrd
import datetime as dt
import sqlite3
# def sql(query,var=''):
# # connection = pypyodbc.connect('Driver={SQL Server};Server=10.62.24.161\SQLEXPRESS;Database=web_cong_viec_amc;uid=aos;pwd=aos159753')
# # connection = pypyodbc.connect('Driver={SQL Server};Server=10.62.24.161\SQLEXPRESS;Database=AMC_B;uid=aos;pwd=aos159753')
# # connection = pypyodbc.connect('Driver={SQL Server};Server=10.62.24.161\SQLEXPRESS;Database=web_cong_viec_amc;uid=aos;pwd=aos159753')
# connection = pypyodbc.connect('Driver={SQL Server};Server=10.62.24.161\SQLEXPRESS;Database=WEB_CONG_VIEC;uid=aos;pwd=aos159753')
# # connection = pypyodbc.connect('Driver={SQL Server};Server=10.62.24.123,1433\SQL2008;Database=web_cong_viec_amc_mien_nam;uid=phunq;pwd=aos159753')
# cursor = connection.cursor()
# cursor.execute(query,var)
# if query.lower()[:6] == 'select':
# x = cursor.fetchall()
# cursor.close()
# return x
# else:
# cursor.commit()
# cursor.close()
def sqlite(query,var=''):
connection = sqlite3.connect(r'sm_tool.db')
cursor = connection.cursor()
cursor.execute(query,var)
if query.lower()[:6] == 'select':
x = cursor.fetchall()
connection.close()
return x
elif query.lower()[:6] == 'create':
connection.close()
else:
connection.commit()
connection.close()
def str_to_dt(x):
try:
return dt.datetime.strptime(x,'%H:%M %d/%m/%Y')
except:
return dt.datetime.strptime(x,'%d/%m/%Y')
# n = xlrd.open_workbook(r'vincom.xlsx')
# sheet = n.sheet_by_index(0)
# print (sheet.nrows)
# for i in range(1,sheet.nrows):
# name1 = sheet.cell(i,0).value
# name2 = sheet.cell(i,1).value
# name3 = sheet.cell(i,2).value
# name4 = sheet.cell(i,3).value
# name5 = sheet.cell(i,4).value
# name6 = sheet.cell(i,5).value
# name7 = sheet.cell(i,6).value
# name8 = sheet.cell(i,7).value
# name9 = sheet.cell(i,8).value
# name10 = sheet.cell(i,9).value
# r = [name1, name2, name3, name4, name5, name6, name7, name8, name9, name10]
# sqlite("INSERT INTO bds_lien_ke_bt values({})".format(",".join(["?"]*10)), r)
n = xlrd.open_workbook(r'bt.xlsx')
sheet = n.sheet_by_index(0)
print (sheet.nrows)
for i in range(1,sheet.nrows):
print (i)
name1 = sheet.cell(i,0).value
name2 = sheet.cell(i,1).value
name3 = sheet.cell(i,2).value
name4 = sheet.cell(i,3).value
name5 = sheet.cell(i,4).value
name6 = sheet.cell(i,5).value
name7 = sheet.cell(i,6).value
name8 = sheet.cell(i,7).value
name9 = sheet.cell(i,8).value
name10 = sheet.cell(i,9).value
r = [name1, name2, name3, name4, name5, name6, name9, name10, name7, name8]
sqlite("INSERT INTO bds_lien_ke_bt values({})".format(",".join(["?"]*10)), r)
|
[
"[email protected]"
] | |
eb407744e889320793ada3b0ace22d79e6e08dcb
|
4419f60ab3430b4b9f3b5cca45faeb012eff2949
|
/Day18/Day18copy.py
|
b4aeda58ca5c2f0c8bd86f9f5043c47733e0cbf4
|
[] |
no_license
|
Bumbi54/Advent2019
|
4be5c8eb5275300911f07860c9b1460bf7b12c8a
|
1a9f86b083ebee05d87b1ded3734fe16b7282a6d
|
refs/heads/master
| 2020-09-23T01:18:12.562240 | 2020-02-20T09:09:00 | 2020-02-20T09:09:00 | 225,364,315 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,394 |
py
|
import time
import operator
from collections import deque
def readInput(fileName):
"""
Read input file and parse it into a string
:param fileName: name of input file
:return: list of input file content (each line is new element)
"""
with open(fileName, 'r') as file:
fileContent = file.readlines()
return fileContent
def parseInputFile(inputList):
"""
Parse input file into coordinate system, and extract keys from it.
"""
keyDict = {}
doorDict = {}
caveSystem = {}
x = 0
y = 0
startPosition = (0, 0)
for line in inputList:
for location in line:
if location == "@":
startPosition = (x ,y)
if location != "#" and location != "\n":
if "a" <= location <= "z":
keyDict[location] = (x, y)
elif "A" <= location <= "Z":
doorDict[location] = (x, y)
caveSystem[(x, y)] = location
if location == "\n":
y = 0
x += 1
else:
y += 1
return caveSystem, keyDict, doorDict, startPosition
class Path():
def __init__(self, currentLocation, visitedLocations, visitedKeys, length ):
self.currentLocation = currentLocation
self.visitedLocations = visitedLocations
self.visitedKeys = visitedKeys
self.length = length
def collectKeys(caveSystem, keyDict, doorDict, startPosition):
"""
Find shortest path that find all of the keys in cave.
"""
queue = deque()
startPath = Path(startPosition, set([startPosition]), set(), 0)
print(startPosition)
caveSystem[startPosition] = "."
queue.append(startPath)
directions = [
(0, 1),
(1, 0),
(-1, 0),
(0, -1)
]
resultLength = [9999999999999999999999999]
cache ={}
distanceCache = {}
while queue:
currentPath = queue.pop()
keyString = list(currentPath.visitedKeys)
keyString.sort()
keyString = "".join(keyString)
#print(f"location: {currentPath.currentLocation}, keys: {currentPath.visitedKeys}, length: {currentPath.length}, visitedLocations: {currentPath.visitedLocations}")
#print(f"len:{len(queue)}")
#print(f"distanceCache:{distanceCache}")
if min(resultLength) > currentPath.length and (keyString not in cache.get(currentPath.currentLocation, []) or currentPath.length < distanceCache.get((keyString, currentPath.currentLocation), 9999999999999999)):
if currentPath.currentLocation in cache:
cache[currentPath.currentLocation].append(keyString)
else:
cache[currentPath.currentLocation] = [keyString]
distanceCache[(keyString, currentPath.currentLocation)] = currentPath.length
for direction in directions:
flag = True
newPosition = tuple(map(operator.add, direction, currentPath.currentLocation))
if newPosition not in currentPath.visitedLocations and caveSystem.get(newPosition, "#") != "#":
visitedKeys = currentPath.visitedKeys.copy()
locations = currentPath.visitedLocations.copy()
if "a" <= caveSystem.get(newPosition) <= "z":
if caveSystem.get(newPosition) not in visitedKeys:
locations = set()
visitedKeys.add(caveSystem.get(newPosition))
#print(len(visitedKeys), len(keyDict.keys()))
if len(visitedKeys) == len(keyDict.keys()):
resultLength.append(currentPath.length + 1)
print(currentPath.length + 1, min(resultLength))
flag = False
if flag and (("A" <= caveSystem.get(newPosition) <= "Z" and caveSystem.get(newPosition).lower() in visitedKeys)
or caveSystem.get(newPosition) == "@" or caveSystem.get(newPosition) == "." or
("a" <= caveSystem.get(newPosition) <= "z") ):
locations.add(newPosition)
#print(f"Addnew. newPosition: {newPosition}, direction: {direction}")
newPath = Path(newPosition, locations, visitedKeys, currentPath.length + 1)
queue.append(newPath)
return resultLength
def collectKeys2(caveSystem, keyDict, doorDict, startPosition):
"""
Find shortest path that find all of the keys in cave.
"""
queue = deque()
startPath = Path(startPosition, set([startPosition]), set(), 0)
queue.append(startPath)
directions = [
(0, 1),
(1, 0),
(-1, 0),
(0, -1)
]
resultLength = []
while queue:
currentPath = queue.pop()
for direction in directions:
flag = True
newPosition = tuple(map(operator.add, direction, currentPath.currentLocation))
print(f"currentPath.currentLocation:{currentPath.currentLocation}, newPosition: {newPosition}")
#time.sleep(1)
if newPosition not in currentPath.visitedLocations and caveSystem.get(newPosition, "#") != "#":
print("Am i here")
visitedKeys = currentPath.visitedKeys.copy()
locations = currentPath.visitedLocations.copy()
if "a" <= caveSystem.get(newPosition) <= "z":
if caveSystem.get(newPosition) not in visitedKeys:
locations = set()
visitedKeys.add(caveSystem.get(newPosition))
print(len(visitedKeys), len(keyDict.keys()))
if len(visitedKeys) == len(keyDict.keys()):
print(f"Hope: {currentPath.length + 1}")
resultLength.append(currentPath.length + 1)
flag = False
if flag and (("A" <= caveSystem.get(newPosition) <= "Z" and caveSystem.get(newPosition).lower() in visitedKeys)
or caveSystem.get(newPosition) == "@" or caveSystem.get(newPosition) == "." or
("a" <= caveSystem.get(newPosition) <= "z") and caveSystem.get(newPosition) ):
print("Adding new to queue")
locations.add(newPosition)
newPath = Path(newPosition, locations, visitedKeys, currentPath.length + 1)
queue.append(newPath)
return resultLength
if __name__ == "__main__":
inputList = readInput("input.txt")
print(f"readInput: {inputList}")
caveSystem, keyDict, doorDict, startPosition = parseInputFile(inputList)
print(f"parseInputFile, caveSystem:{caveSystem}, keyDict:{keyDict}, doorDict: {doorDict}, startPosition: {startPosition}")
resultLength = collectKeys( caveSystem, keyDict, doorDict, startPosition)
print(f"collectKeys: {resultLength}")
print(f"collectKeys: {min(resultLength)}")
|
[
"[email protected]"
] | |
e19a2fdc668d2afa05c73abad92ebda28b69684e
|
edd28a06e0d212293c64ea0abb2190fca619eb31
|
/eval.py
|
a2fb671d6bf999ab93fe1c4ac1de94253751dc4e
|
[] |
no_license
|
sagittefrat/Information-Retrieval-competition
|
e64595237db9b7e64b4438bf78141bd67cc0ab3a
|
c1394dadb38070adc301462564e4c282232a2f24
|
refs/heads/master
| 2021-01-01T13:29:42.163977 | 2020-04-09T13:36:47 | 2020-04-09T13:36:47 | 239,299,121 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,228 |
py
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
sample_queries=xrange(301,351)
eval_array=np.zeros(shape=(50))
res_file_old=pd.read_csv('res_file_1000', sep=' ', names=['query', 'b1','docno','b2','b3','b4'])
res_file_new=pd.read_csv('new_relevant_docs', sep=' ',names=['query', 'b1','docno','b2','b3','b4'])
def eval(file1, file2) :
for query in sample_queries:
'''file1[file1['query']==query].to_csv('temp_res', sep=' ', header=None, index=False)
os.system('../trec_eval_9.0/trec_eval qrels_50_Queries temp_res | grep all | grep map > eval_file1')
eval_f1=pd.read_csv('eval_file1', sep= '\t', header=None, names=['name','all','value'])'''
file2[file2['query']==query].to_csv('temp_res', sep=' ', header=None, index=False)
os.system('../trec_eval_9.0/trec_eval qrels_50_Queries temp_res | grep all | grep map > eval_file2')
eval_f2=pd.read_csv('eval_file2', sep= '\t', header=None, names=['name','all','value'])
#eval_array[query-301] = ( int(query), eval_f1['value'][0], eval_f2['value'][0] )
eval_array[query-301] = ( eval_f2['value'][0] )
bla=pd.DataFrame(eval_array)
bla.to_csv('temp_res', sep='\t', header=['new'], index=False)
eval(res_file_old, res_file_new)
|
[
"[email protected]"
] | |
ec361046d8e8e510b12ba294598302bfab746f10
|
ad340bcc17c87e28078846c4630138a1156c86e4
|
/p030.py
|
11c461679b33698c6c010d098cd44989e4c325ea
|
[] |
no_license
|
galustian/euler
|
ea6b685c6f578682c546225cf6e477a904c03d8d
|
50ce86369cfb5d8e59388ed973b9e552e4387219
|
refs/heads/master
| 2020-04-23T03:30:06.767208 | 2019-06-16T15:31:05 | 2019-06-16T15:31:05 | 170,880,212 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,007 |
py
|
import time
def increment(number):
if number[-1] != 9:
number[-1] += 1
return
for i in reversed(range(0, len(number))):
if number[i] == 9 and i == 0:
number.append(0)
number[0], number[1:] = 1, [0] * (len(number)-1)
elif number[i] == 9:
number[i] = 0
else:
number[i] += 1
break
def to_number(number):
num_str = ''
for d in number:
num_str += str(d)
return int(num_str)
def pow_sum(number, power=5):
sum_ = 0
for d in number:
sum_ += d**power
return sum_
def compute():
sum_numbers = []
number = [1, 0]
# 9**5 * 6 == 354294
while to_number(number) < 354_295:
num = to_number(number)
if num == pow_sum(number):
sum_numbers.append(num)
increment(number)
return sum(sum_numbers), sum_numbers
if __name__ == '__main__':
start = time.time()
print(compute())
print(time.time()-start)
|
[
"[email protected]"
] | |
07663a4e8be6728f443ba0d3c83963e1f88260a7
|
addc17385b371aea8ad109349cfed34b4c6194a7
|
/askisi1.py
|
ed28ffe7d56fc23610dbbd045f691cfb7bf89e1a
|
[] |
no_license
|
GeorgeXyp01/Python-Papei
|
0387d091df2e589042e426f4ded56e34d4a27ecb
|
b041fb86cffd0618b4daf2044823e2bf5f09ada7
|
refs/heads/master
| 2022-03-29T20:07:25.658035 | 2020-01-21T15:01:40 | 2020-01-21T15:01:40 | 234,324,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
f = open("file.txt", "r")
words = f.read().split()
vowels=["a", "e", "u", "o", "i", "y"]
def func(n):
return len(n)
for i in range(len(words)):
str = words[i]
list(str)
for x in str:
if x in vowels:
str = str.replace(x, "")
words[i]="".join(str)
words.sort(reverse=True, key=func)
for i in range(5):
print(words[i][::-1])
f.close()
|
[
"[email protected]"
] | |
0a545d6d5673a0f28df670d76f65a70863e87890
|
8c451e438739d741a127342e93727f3bac80b63e
|
/contributions/HARMONY 2021/test_gen_sedml.py
|
49fdc9d82e9772c2f6ac6a3f3baf4415b563de11
|
[] |
no_license
|
SED-ML/sedml-test-suite
|
a5d6c5858e81d615fa0ba7bcaa7d3af90ae55c47
|
853d8cdac8987bdf9b901936c3c8888455602212
|
refs/heads/master
| 2023-06-14T00:02:58.086947 | 2021-07-07T23:45:57 | 2021-07-07T23:45:57 | 47,284,156 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,424 |
py
|
r"""
####################################################################################################
tellurium 2.2.1
-+++++++++++++++++- Python Environment for Modeling and Simulating Biological Systems
.+++++++++++++++.
.+++++++++++++. Homepage: http://tellurium.analogmachine.org/
-//++++++++++++/. -:/-` Documentation: https://tellurium.readthedocs.io/en/latest/index.html
.----:+++++++/.++ .++++/ Forum: https://groups.google.com/forum/#!forum/tellurium-discuss
:+++++: .+:` .--++ Bug reports: https://github.com/sys-bio/tellurium/issues
-+++- ./+:-://. Repository: https://github.com/sys-bio/tellurium
.+. `...`
SED-ML simulation experiments: http://www.sed-ml.org/
# Change back to the original (with 'getName') when libsedml is fixed
sedmlDoc: L1V4
inputType: 'SEDML_STRING'
workingDir: 'C:\Users\Lucian\Desktop\tellurium'
saveOutputs: 'False'
outputDir: 'None'
plottingEngine: '<MatplotlibEngine>'
Windows-10-10.0.19041-SP0
python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
####################################################################################################
"""
import tellurium as te
from roadrunner import Config
from tellurium.sedml.mathml import *
from tellurium.sedml.tesedml import process_trace, terminate_trace, fix_endpoints
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
try:
import libsedml
except ImportError:
import tesedml as libsedml
import pandas
import os.path
Config.LOADSBMLOPTIONS_RECOMPILE = True
workingDir = r'C:\Users\Lucian\Desktop\tellurium'
# --------------------------------------------------------
# Models
# --------------------------------------------------------
# Model <model0>
model0 = te.loadSBMLModel(os.path.join(workingDir, 'hill.xml'))
# --------------------------------------------------------
# Tasks
# --------------------------------------------------------
# Task <task0>
# not part of any DataGenerator: task0
# Task <task1>
task1 = []
# Task: <task0>
task0 = [None]
model0.setIntegrator('cvode')
if model0.conservedMoietyAnalysis == True: model0.conservedMoietyAnalysis = False
__range__uniform_linear_for_n = np.linspace(start=1.0, stop=15.0, num=26)
for __k__uniform_linear_for_n, __value__uniform_linear_for_n in enumerate(__range__uniform_linear_for_n):
model0.reset()
model0['n'] = __value__uniform_linear_for_n
model0.timeCourseSelections = ['n', 'time', '[S2]']
model0.reset()
task0[0] = model0.simulate(start=0.0, end=35.0, steps=30)
task1.extend(task0)
# --------------------------------------------------------
# DataGenerators
# --------------------------------------------------------
# DataGenerator <plot_0_0_0>
__var__task1_____time = np.column_stack([sim['time'] for sim in task1])
if len(__var__task1_____time.shape) == 1:
__var__task1_____time.shape += (1,)
plot_0_0_0 = __var__task1_____time
# DataGenerator <plot_0_0_1>
__var__task1_____n = np.column_stack([sim['n'] for sim in task1])
if len(__var__task1_____n.shape) == 1:
__var__task1_____n.shape += (1,)
plot_0_0_1 = __var__task1_____n
# DataGenerator <plot_0_0_2>
__var__task1_____S2 = np.column_stack([sim['[S2]'] for sim in task1])
if len(__var__task1_____S2.shape) == 1:
__var__task1_____S2.shape += (1,)
plot_0_0_2 = __var__task1_____S2
# --------------------------------------------------------
# Outputs
# --------------------------------------------------------
# Output <plot_0>
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(num=None, figsize=(9, 5), dpi=80, facecolor='w', edgecolor='k')
from matplotlib import gridspec
__gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax = plt.subplot(__gs[0])
ax.pcolormesh(plot_0_0_0, plot_0_0_1, plot_0_0_2, color='#1f77b4', linewidth=1.5, alpha=1.0, label='task1.S2', cmap='RdBu', shading='auto')
ax.set_title('UniformTimecourse', fontweight='bold')
ax.set_xlabel('task1.time', fontweight='bold')
ax.set_ylabel('task1.n', fontweight='bold')
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.savefig(os.path.join(workingDir, 'plot_0.png'), dpi=100)
plt.show()
####################################################################################################
|
[
"[email protected]"
] | |
4f607029dc5f2cefac0e8a280a10a98d7d07073f
|
73b56f4333de6c63244b4e4e504ae187a520cb4d
|
/modules/mod_dirlist_v101.py
|
df302123b416376a1b92939fc0d723bd9cc77c24
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sjh/automactc
|
d1e5cc9ac36d9de2a9eda0655af63be51a25c731
|
a9726a98fdc001d2a9331311d4a1eb3de4bd5fe1
|
refs/heads/master
| 2020-05-26T02:00:24.004055 | 2019-05-22T15:51:24 | 2019-05-22T15:51:24 | 188,069,194 | 1 | 0 | null | 2019-05-22T15:47:09 | 2019-05-22T15:47:09 | null |
UTF-8
|
Python
| false | false | 9,094 |
py
|
#!/usr/bin/env python
# IMPORT FUNCTIONS FROM COMMON.FUNCTIONS
from common.functions import stats2
from common.functions import get_codesignatures
from common.functions import read_stream_bplist
from common.functions import multiglob
# IMPORT STATIC VARIABLES FROM MAIN
from __main__ import inputdir
from __main__ import outputdir
from __main__ import forensic_mode
from __main__ import no_tarball
from __main__ import quiet
from __main__ import dirlist_include_dirs
from __main__ import dirlist_exclude_dirs
from __main__ import dirlist_no_multithreading
from __main__ import hash_alg
from __main__ import hash_size_limit
from __main__ import no_code_signatures
from __main__ import recurse_bundles
from __main__ import debug
from __main__ import archive
from __main__ import startTime
from __main__ import full_prefix
from __main__ import data_writer
import os
import glob
import sys
import hashlib
import pytz
import itertools
import time
import io
import logging
import traceback
from collections import OrderedDict
from datetime import datetime
from xattr import listxattr, getxattr
from multiprocessing.dummy import Pool as ThreadPool
_modName = __name__.split('_')[-2]
_modVers = '.'.join(list(__name__.split('_')[-1][1:]))
log = logging.getLogger(_modName)
def shasum(filename, filesize, block_size=65536):
if filesize <= hash_size_limit and filesize > 0:
sha256 = hashlib.sha256()
try:
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
sha256 = sha256.hexdigest()
except IOError:
sha256 = 'ERROR'
else:
sha256 = ''
return sha256
def md5sum(filename, filesize, block_size=65536):
if filesize <= hash_size_limit and filesize > 0:
md5 = hashlib.md5()
try:
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
md5.update(block)
md5 = md5.hexdigest()
except:
md5 = 'ERROR'
else:
md5 = ''
return md5
def xattr_get(fullpath, attr_name):
try:
list_attrs = listxattr(fullpath)
if len(list_attrs) > 0 and attr_name in list_attrs:
out = getxattr(fullpath, attr_name)
return out
else:
return ''
except:
return 'ERROR'
def handle_files(name):
global counter
counter+=1
if not quiet:
if debug:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s | FileName: %s \033[K\r' % (counter,datetime.now(pytz.UTC)-startTime,name))
else:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s \r' % (counter,datetime.now(pytz.UTC)-startTime))
sys.stdout.flush()
# get timestamps and metadata for each file
record = OrderedDict((h, '') for h in headers)
stat_data = stats2(os.path.join(root, name))
record.update(stat_data)
# get quarantine extended attribute for each file, if available
if stat_data['mode'] != "Other":
try:
quarantine = xattr_get(os.path.join(root, name),"com.apple.quarantine").split(';')[2]
except:
quarantine = xattr_get(os.path.join(root, name),"com.apple.quarantine")
record['quarantine'] = quarantine.replace('\\x20',' ')
# get wherefrom extended attribute for each file, if available
wherefrom = xattr_get(os.path.join(root, name),"com.apple.metadata:kMDItemWhereFroms")
if wherefrom != "" and wherefrom.startswith("bplist"):
record['wherefrom_1'] = wherefrom
else:
record['wherefrom_1'] = ['']
# if hash alg is specified 'none' at amtc runtime, do not hash files. else do sha256 and md5 as specified (sha256 is default at runtime, md5 is user-specified)
if "none" not in hash_alg and stat_data['mode'] == "Regular File":
if 'sha256' in hash_alg:
record['sha256'] = shasum(os.path.join(root, name),record['size'])
if 'md5' in hash_alg:
record['md5'] = md5sum(os.path.join(root, name),record['size'])
# output.write_entry(record.values())
return record
def filePooler(files):
file_data = filePool.map(handle_files, files)
return file_data
headers = ['mode','size','uid','gid','mtime','atime','ctime','btime','path','name','sha256','md5','quarantine','wherefrom_1','wherefrom_2','code_signatures']
output = data_writer(_modName, headers)
# if there are specific directories to recurse, recurse them.
if dirlist_include_dirs != ['']:
root_list = []
for i in dirlist_include_dirs:
root_list.append(os.path.join(inputdir, i))
root_list = list(itertools.chain.from_iterable([glob.glob(i) for i in root_list]))
# if there are no specific directories to recurse, recurse from the root of the inputdir. also write the stats data to
else:
root_list = glob.glob(inputdir)
record = OrderedDict((h, '') for h in headers)
stat_data = stats2(inputdir)
record.update(stat_data)
output.write_entry(record.values())
# by default (if no-defaults is NOT in exclusion flag) exclude the following directories
if 'no-defaults' not in dirlist_exclude_dirs:
if not forensic_mode:
default_exclude = [
'.fseventsd','.DocumentRevisions-V100','.Spotlight-V100',
'Users/*/Pictures', 'Users/*/Library/Application Support/AddressBook',
'Users/*/Calendar', 'Users/*/Library/Calendars',
'Users/*/Library/Preferences/com.apple.AddressBook.plist'
]
else:
default_exclude = ['.fseventsd','.DocumentRevisions-V100','.Spotlight-V100']
# if no-defaults is in the exclusion flag, remove no-defaults and use the user-provided exclusion list
else:
default_exclude = []
dirlist_exclude_dirs.remove('no-defaults')
# if there are specific directories to exclude, do not recurse them
if dirlist_exclude_dirs != ['']:
exclude_list = [os.path.join(inputdir, i).strip("/") for i in default_exclude + dirlist_exclude_dirs]
# if no specific directories are excluded, use default-list (created above)
else:
exclude_list = [os.path.join(inputdir, i).strip("/") for i in default_exclude]
# if NOT running with -f flag for forensic mode, exclude everything in /Volumes/* to prevent recursion of mounted volumes IN ADDITION to other exclusions.
if not forensic_mode:
exclude_list += [i for i in glob.glob(os.path.join(inputdir, 'Volumes/*'))]
exclude_list = multiglob(inputdir, exclude_list)
else:
exclude_list = multiglob('/', exclude_list)
log.debug("The following directories will be excluded from dirlist enumeration: {0}".format(exclude_list))
# determine which hashing algorithms to run
if type(hash_alg) is list:
hash_alg = [''.join([x.lower() for x in i]) for i in hash_alg]
elif type(hash_alg) is str:
hash_alg = [hash_alg]
counter=0
filePool = ThreadPool(4)
for i in root_list:
for root, dirs, files in os.walk(i, topdown=True):
# prune excluded directories and files to prevent further recursion into them
dirs[:] = [d for d in dirs if os.path.join(root,d) not in exclude_list]
files[:] = [f for f in files if os.path.join(root,f) not in exclude_list]
# do not recurse into bundles that end with any of the file extensions below UNLESS told to at amtc runtime
exc_bundles = ('.app', '.framework','.lproj','.plugin','.kext','.osax','.bundle','.driver','.wdgt')
if root.strip().endswith(exc_bundles) and not (os.path.basename(root)).startswith('.') and recurse_bundles == False:
dirs[:] = []
files[:] = []
if dirlist_no_multithreading:
file_data = [handle_files(file) for file in files]
else:
file_data = filePooler(files)
for record in file_data:
wf = record['wherefrom_1']
if wf != ['']:
try:
parsed_wf = read_stream_bplist(wf)
parsed_wf_utf8 = [str(a.encode('utf-8')) for a in parsed_wf if a != ""]
except:
pathname = os.path.join(record['path'],record['name'])
parsed_wf_utf8 = ['ERROR']
log.debug("Could not parse embedded binary plist for kMDItemWhereFroms data from file {0}. {1}".format(pathname,[traceback.format_exc()]))
if len(parsed_wf_utf8) > 0:
record['wherefrom_1'] = parsed_wf_utf8[0]
if len(parsed_wf_utf8) > 1:
record['wherefrom_2'] = parsed_wf_utf8[1]
else:
record['wherefrom_1'] = ''
else:
record['wherefrom_1'] = ''
output.write_entry(record.values())
# bundles that will be code-sig checked
check_signatures_bundles = ('.app','.kext','.osax')
for name in dirs:
counter+=1
if not quiet:
if debug:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s | FileName: %s \033[K\r' % (counter,datetime.now(pytz.UTC)-startTime,name))
else:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s \r' % (counter,datetime.now(pytz.UTC)-startTime))
sys.stdout.flush()
# get timestamps and metadata for each file
record = OrderedDict((h, '') for h in headers)
stat_data = stats2(os.path.join(root, name))
record.update(stat_data)
# directory is bundle that ends with either of the three extensions, check its code signatures
if no_code_signatures is False and name.endswith(check_signatures_bundles) and not name.startswith('.'): #meaning DO process code signatures
record['code_signatures'] = str(get_codesignatures(os.path.join(root, name)))
output.write_entry(record.values())
filePool.close()
filePool.join()
if not quiet:
sys.stdout.write('\n')
sys.stdout.flush()
|
[
"[email protected]"
] | |
88f8a932922b5c3e9e58329fb7b4253de9525866
|
a0cc68cc9b80e20e8596ac03ac32db3d7897f950
|
/Utilities/Ping.py
|
a3dc28950d2a2ff3a1c95ce9d18cd82f902fa99e
|
[] |
no_license
|
Brynkr/CamelBot
|
7d0939630ed2c0efe340184fda4fa5a9d6b51a4b
|
842ee3ff169ad9e1bde98ce9afadc478b5b35ace
|
refs/heads/master
| 2021-01-20T15:36:59.661255 | 2018-04-12T23:18:42 | 2018-04-12T23:18:42 | 63,467,991 | 4 | 4 | null | 2018-04-06T20:35:45 | 2016-07-16T05:57:42 |
Python
|
UTF-8
|
Python
| false | false | 489 |
py
|
#checks if a website is online
import discord
import os
import subprocess
async def ping(message, client):
hostname = message.content[5:]
# response = os.system("ping -n 1 " + hostname)
response = subprocess.call(["ping", "-n", "1", hostname], shell=False);
if response == 0:
msg = hostname + ' is up!'
else:
msg = hostname + ' is down!'
msg = "Oops this is broken at the moment, I'm sure I'll get around to fixing it some day.."
return msg
|
[
"[email protected]"
] | |
195477ca424eff418c9b7f3bdfde4b34a647dfb4
|
e6e4cba12d934f5622ec5d530a2e0b11585ae619
|
/scripts/misc.py
|
d8d7e71eea2dd21f34df009995baf637dc663738
|
[
"BSD-3-Clause"
] |
permissive
|
gregsalvesen/bhspinf
|
83343bc68b48d2f3b967d83930eca4e24ccd55e2
|
3dc054286e801ca518bc64e4a77fe5ca0bdf04ea
|
refs/heads/main
| 2023-01-08T20:17:16.933565 | 2020-11-19T19:04:36 | 2020-11-19T19:04:36 | 313,707,043 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,366 |
py
|
import numpy as np
'''
Miscellaneous functions.
'''
#====================================================================================================
# Collect the command line keyword arguments (nicked from Jordan Mirocha)
def get_cmd_line_kwargs(argv):
# Initialize the dictionary of command line keyword arguments
cmd_line_kwargs = {}
# Loops through the keyword arguments
for arg in argv[1:]:
# Split the argument into the variable name (pre) and its value (post)
#pre, post = arg.split('=')
pre = arg.split('=')[0]
post = arg.split(pre+'=')[-1]
# Need to do some type-casting
if post.isdigit():
cmd_line_kwargs[pre] = int(post)
elif post.isalpha():
cmd_line_kwargs[pre] = str(post)
elif post[0] == '[':
vals = post[1:-1].split(',')
#cmd_line_kwargs[pre] = np.array(map(float, vals))
cmd_line_kwargs[pre] = list(map(float, vals))
else:
try:
cmd_line_kwargs[pre] = float(post)
except ValueError:
# Strings with underscores will return False from isalpha()
cmd_line_kwargs[pre] = str(post)
return cmd_line_kwargs
#====================================================================================================
|
[
"[email protected]"
] | |
e18b0d3d437476da904df18390cea2ad2363d612
|
2b9397e9e26f7d97ce6983d36c9842ac773b70c6
|
/workforce/migrations/0009_auto_20181015_0646.py
|
c4953694528ecce12900a7fff2ae42803176183d
|
[] |
no_license
|
eakDev/aip-1
|
288ed7d7b8cf65c74b510f4f4e45292e3342796d
|
3db2520e3c246e25e2cfa62e395a3ba6ebe37252
|
refs/heads/main
| 2023-05-02T08:57:42.449727 | 2021-05-23T10:16:59 | 2021-05-23T10:16:59 | 386,578,482 | 1 | 0 | null | 2021-07-16T09:15:22 | 2021-07-16T09:15:22 | null |
UTF-8
|
Python
| false | false | 509 |
py
|
# Generated by Django 2.1.1 on 2018-10-15 06:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workforce', '0008_projectsite'),
]
operations = [
migrations.AlterField(
model_name='employeeprofile',
name='project_site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='site', to='workforce.ProjectSite'),
),
]
|
[
"[email protected]"
] | |
a1d0e502971ecdca7301cb7793a6314c0056fe5f
|
219b8dbe9f024055e38b81ec383a2723549759e7
|
/vocprez/pyldapi/renderer_container.py
|
a28a2bf1f2de15fe4c4b932d69dd8347576090f5
|
[] |
no_license
|
BritishGeologicalSurvey/VocPrez3
|
9e813d2f412ee5d3d9d12865bced5d6c00de77c2
|
647b8f7edc618c51ef00f9cfc9d727bd0453a2ce
|
refs/heads/main
| 2023-08-23T09:58:28.651424 | 2021-11-08T21:35:57 | 2021-11-08T21:35:57 | 424,207,349 | 0 | 1 | null | 2021-11-03T12:04:58 | 2021-11-03T12:04:57 | null |
UTF-8
|
Python
| false | false | 2,023 |
py
|
from .renderer import Renderer
from .profile import Profile
from .data import RDF_MEDIATYPES
class ContainerRenderer(Renderer):
def __init__(self,
request,
instance_uri,
profiles=None,
default_profile_token="mem",
**kwargs
):
contanno = Profile(
uri="https://w3id.org/profile/contanno",
id="contanno",
label="Container Annotations",
comment="Describes container annotations only, that is a veiw of a container object's properties"
" other than its members.",
mediatypes=["text/html"] + RDF_MEDIATYPES,
default_mediatype="text/html",
languages=["en"],
default_language="en",
)
mem = Profile(
uri="https://w3id.org/profile/mem", # the ConnegP URI for Alt Rep Data Model
id="mem",
label="Members",
comment="A very basic data model that lists the members of container objects only, i.e. not their other "
"properties",
mediatypes=["text/html", "application/json"] + RDF_MEDIATYPES,
default_mediatype="text/html",
languages=["en"],
default_language="en",
)
new_profiles = {
"contanno": contanno,
"mem": mem,
}
if profiles is not None:
new_profiles.update(profiles)
super().__init__(
request,
str(request.url).split("?")[0],
new_profiles,
default_profile_token,
)
def render(self):
alt = super().render()
if alt is not None:
return alt
elif self.profile == "mem":
raise NotImplementedError("You must implement handling for the mem profile")
elif self.profile == "contanno":
raise NotImplementedError("You must implement handling for the contanno profile")
|
[
"[email protected]"
] | |
adf8d05e846cad76e00b677b7883a4fac3d451a7
|
2c5ce6de8fbdb16b42c4005e3a49fc9450e3e8c3
|
/src/sec03/21.py
|
8577c926335f0b33dbb377e096a27859757683eb
|
[] |
no_license
|
takuya0412/100-NLP-knocks
|
2ffb549fc8ed4be881e47b6d62e045cff93997c5
|
038f956e1e2f332550ac999aec06cb4f80549b33
|
refs/heads/master
| 2020-09-21T22:26:28.384105 | 2020-01-14T14:29:30 | 2020-01-14T14:29:30 | 224,954,313 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
py
|
import re
def extract_category_line(path):
with open(path, 'r') as f:
text = f.readline()
all_text = ""
while text:
all_text += text
text = f.readline()
ptn_a = re.compile(r"Category")
|
[
"[email protected]"
] | |
497c4c10fcc4b3f6ac6aa3c3828af57d83cc92b3
|
85b892b5cbf5b51b20c0b0c498857ee26221603e
|
/poll/migrations/0001_initial.py
|
b484df089c245b30b71e04429f62be404e3b24f5
|
[] |
no_license
|
muhammadahmadazhar/nested_serializer_hardic_patel
|
aa41affabd749892cbe6fb961eed7d0ee3fae45b
|
a07a9c6c36a1bc342d20a98373aec88ecf8eb54d
|
refs/heads/master
| 2023-06-01T14:08:20.792660 | 2021-06-22T12:52:47 | 2021-06-22T12:52:47 | 379,268,111 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,471 |
py
|
# Generated by Django 3.2.4 on 2021-06-18 07:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50)),
],
options={
'ordering': [],
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.TextField()),
('status', models.CharField(default='inactive', max_length=10)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(to='poll.Tag')),
],
options={
'ordering': ('-created_at',),
'abstract': False,
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.question')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('choice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.choice')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
08ea484a417483964f1062a3a74b9ad9c8d66566
|
33a0d93fe4fdc6835a86cc6b3eea5f7596348e8a
|
/twinkle/migrations/0001_initial.py
|
42deb6c82e703a566a5a8baa4c08b2ede3a2cb39
|
[] |
no_license
|
VanWade/Starry
|
3c8de534f61352ab4df8e27df349bd8ed9c8e1aa
|
80f9147bc143bc2a962fb360974c65a4a73f15fa
|
refs/heads/master
| 2020-04-02T05:58:35.612746 | 2016-06-22T09:53:53 | 2016-06-22T09:53:53 | 61,311,327 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,296 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Twinkle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date=b'publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(default=b'draft', max_length=10, choices=[(b'draft', b'Draft'), (b'published', b'Published')])),
('author', models.ForeignKey(related_name='twinkles_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
|
[
"[email protected]"
] | |
18039e8eb5a3683b6140b934c45af8e2c5ba5d72
|
8d06e935b26f8f305055ee52acf040c46676c540
|
/components/model_download.py
|
306f67f9fdbd196831cd51a9a72cfeed4670f86d
|
[] |
no_license
|
PascalSchroederDE/kf-sample-pipeline
|
58ba7c544e9ed52b369a2752b740dde393854eca
|
e11239c9ffa887aa329b76b72cf2220b58b83076
|
refs/heads/master
| 2020-07-19T05:46:34.499039 | 2019-09-11T19:13:23 | 2019-09-11T19:13:23 | 206,385,572 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,312 |
py
|
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import argparse
import logging
def write_file(path, content):
f = open(path, "w")
f.write(content)
f.close()
def get_activation_func(shorthand):
return {
"relu": tf.nn,
"softmax": tf.nn.softmax
}[shorthand]
def load_data(path):
return pd.read_csv(path)
def download_model(input_shape):
return tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
def main():
parser = argparse.ArgumentParser(description="Feature engineering")
parser.add_argument('--input_shape_height', type=int, help='Heigt of input images')
parser.add_argument('--input_shape_width', type=int, help='Width of input images')
parser.add_argument('--output', type=str, help='Output location for model to be build')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.info("Downloading model...")
model = download_model((args.input_shape_height, args.input_shape_width))
logging.info("Saving model...")
model.save(args.output)
write_file("/model.txt", args.output)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3d7ccbc3de07a241689ab3b9d7dea466d243fcb4
|
f9b4d965b7b0065f8254b27899487fb2125691e5
|
/cats_dogs_model_depth.py
|
6528ae47ec552d85738dc744b9a3545a862b706f
|
[] |
no_license
|
zhihanyang2022/cnn-experiments
|
22bb545409fcd0933bccbe8fbfbace6cee2ec566
|
0e6b725426922cbebad98bc1a44f7c69971dc432
|
refs/heads/master
| 2020-04-23T05:26:06.093569 | 2019-03-10T18:24:09 | 2019-03-10T18:24:09 | 170,939,953 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,516 |
py
|
# <codecell>
from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
# <codecell>
import os
original_dataset_dir_cat = '/Users/yangzhihan/Desktop/PetImages/cat'
original_dataset_dir_dog = '/Users/yangzhihan/Desktop/PetImages/dog'
base_dir = '/Users/yangzhihan/My Files/Academic Interests/Computer Science/0 Datasets/cats_and_dogs_dataset'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
test_cats_dir = os.path.join(test_dir, 'cats')
test_dogs_dir = os.path.join(test_dir, 'dogs')
# <codecell>
train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
#
#yield
#next
#support for loops
#adv: much more readable in the for loop
#generator comprehension, use()
#list() converts to list
# generator is good with performance
# <codecell>
model = models.Sequential()
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)
# <codecell>
os.chdir("/Users/yangzhihan/Desktop")
model.save('cats_and_dogs_small_1.h5')
# <codecell>
os.getcwd()
# <codecell>
model = models.load_model('/Users/yangzhihan/Desktop/s`cat_and_dogs_1.h5')
#
|
[
"[email protected]"
] | |
84bb1787d85ee6b5c79f1bc7db24b537c56ebcfa
|
1eb43e05ddc74f6fe7d0aeaf2a6637934ae677fb
|
/snippets/migrations/0001_initial.py
|
3d6a1e5cd90819c03cbb37bdab461709d97ba226
|
[] |
no_license
|
aaa006bd/djangorestdemo
|
1998a1945398a2224c79bf9bf7a6853903f6ee2f
|
c3d2d61e6afe91419750c4f09bcd365a50f15e57
|
refs/heads/master
| 2020-03-31T08:38:38.620605 | 2018-10-08T11:14:35 | 2018-10-08T11:14:35 | 152,066,209 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,276 |
py
|
# Generated by Django 2.1.2 on 2018-10-08 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('code', models.TextField()),
('linenos', models.BooleanField(default=False)),
('language', models.CharField(choices=[('abap', 'ABAP'), ('abnf', 'ABNF'), ('ada', 'Ada'), ('adl', 'ADL'), ('agda', 'Agda'), ('aheui', 'Aheui'), ('ahk', 'autohotkey'), ('alloy', 'Alloy'), ('ampl', 'Ampl'), ('antlr', 'ANTLR'), ('antlr-as', 'ANTLR With ActionScript Target'), ('antlr-cpp', 'ANTLR With CPP Target'), ('antlr-csharp', 'ANTLR With C# Target'), ('antlr-java', 'ANTLR With Java Target'), ('antlr-objc', 'ANTLR With ObjectiveC Target'), ('antlr-perl', 'ANTLR With Perl Target'), ('antlr-python', 'ANTLR With Python Target'), ('antlr-ruby', 'ANTLR With Ruby Target'), ('apacheconf', 'ApacheConf'), ('apl', 'APL'), ('applescript', 'AppleScript'), ('arduino', 'Arduino'), ('as', 'ActionScript'), ('as3', 'ActionScript 3'), ('aspectj', 'AspectJ'), ('aspx-cs', 'aspx-cs'), ('aspx-vb', 'aspx-vb'), ('asy', 'Asymptote'), ('at', 'AmbientTalk'), ('autoit', 'AutoIt'), ('awk', 'Awk'), ('basemake', 'Base Makefile'), ('bash', 'Bash'), ('bat', 'Batchfile'), ('bbcode', 'BBCode'), ('bc', 'BC'), ('befunge', 'Befunge'), ('bib', 'BibTeX'), ('blitzbasic', 'BlitzBasic'), ('blitzmax', 'BlitzMax'), ('bnf', 'BNF'), ('boo', 'Boo'), ('boogie', 'Boogie'), ('brainfuck', 'Brainfuck'), ('bro', 'Bro'), ('bst', 'BST'), ('bugs', 'BUGS'), ('c', 'C'), ('c-objdump', 'c-objdump'), ('ca65', 'ca65 assembler'), ('cadl', 'cADL'), ('camkes', 'CAmkES'), ('capdl', 'CapDL'), ('capnp', "Cap'n Proto"), ('cbmbas', 'CBM BASIC V2'), ('ceylon', 'Ceylon'), ('cfc', 'Coldfusion CFC'), ('cfengine3', 'CFEngine3'), ('cfm', 'Coldfusion HTML'), ('cfs', 'cfstatement'), ('chai', 'ChaiScript'), ('chapel', 'Chapel'), ('cheetah', 'Cheetah'), ('cirru', 'Cirru'), ('clay', 'Clay'), ('clean', 'Clean'), ('clojure', 'Clojure'), ('clojurescript', 'ClojureScript'), ('cmake', 'CMake'), ('cobol', 'COBOL'), ('cobolfree', 'COBOLFree'), ('coffee-script', 'CoffeeScript'), ('common-lisp', 'Common Lisp'), ('componentpascal', 'Component Pascal'), ('console', 'Bash Session'), ('control', 'Debian Control file'), ('coq', 'Coq'), ('cpp', 'C++'), ('cpp-objdump', 'cpp-objdump'), ('cpsa', 'CPSA'), ('cr', 'Crystal'), ('crmsh', 'Crmsh'), ('croc', 'Croc'), ('cryptol', 'Cryptol'), ('csharp', 'C#'), ('csound', 'Csound Orchestra'), ('csound-document', 'Csound Document'), ('csound-score', 'Csound Score'), ('css', 'CSS'), ('css+django', 'CSS+Django/Jinja'), ('css+erb', 'CSS+Ruby'), ('css+genshitext', 'CSS+Genshi Text'), ('css+lasso', 'CSS+Lasso'), ('css+mako', 'CSS+Mako'), ('css+mozpreproc', 'CSS+mozpreproc'), ('css+myghty', 'CSS+Myghty'), ('css+php', 'CSS+PHP'), ('css+smarty', 'CSS+Smarty'), ('cucumber', 'Gherkin'), ('cuda', 'CUDA'), ('cypher', 'Cypher'), ('cython', 'Cython'), ('d', 'D'), ('d-objdump', 'd-objdump'), ('dart', 'Dart'), ('delphi', 'Delphi'), ('dg', 'dg'), ('diff', 'Diff'), ('django', 'Django/Jinja'), ('docker', 'Docker'), ('doscon', 'MSDOS Session'), ('dpatch', 'Darcs Patch'), ('dtd', 'DTD'), ('duel', 'Duel'), ('dylan', 'Dylan'), ('dylan-console', 'Dylan session'), ('dylan-lid', 'DylanLID'), ('earl-grey', 'Earl Grey'), ('easytrieve', 'Easytrieve'), ('ebnf', 'EBNF'), ('ec', 'eC'), ('ecl', 'ECL'), ('eiffel', 'Eiffel'), ('elixir', 'Elixir'), ('elm', 'Elm'), ('emacs', 'EmacsLisp'), ('erb', 'ERB'), ('erl', 'Erlang erl session'), ('erlang', 'Erlang'), ('evoque', 'Evoque'), ('extempore', 'xtlang'), ('ezhil', 'Ezhil'), ('factor', 'Factor'), ('fan', 'Fantom'), ('fancy', 'Fancy'), ('felix', 'Felix'), ('fish', 'Fish'), ('flatline', 'Flatline'), ('forth', 'Forth'), ('fortran', 'Fortran'), ('fortranfixed', 'FortranFixed'), ('foxpro', 'FoxPro'), ('fsharp', 'FSharp'), ('gap', 'GAP'), ('gas', 'GAS'), ('genshi', 'Genshi'), ('genshitext', 'Genshi Text'), ('glsl', 'GLSL'), ('gnuplot', 'Gnuplot'), ('go', 'Go'), ('golo', 'Golo'), ('gooddata-cl', 'GoodData-CL'), ('gosu', 'Gosu'), ('groff', 'Groff'), ('groovy', 'Groovy'), ('gst', 'Gosu Template'), ('haml', 'Haml'), ('handlebars', 'Handlebars'), ('haskell', 'Haskell'), ('haxeml', 'Hxml'), ('hexdump', 'Hexdump'), ('hsail', 'HSAIL'), ('html', 'HTML'), ('html+cheetah', 'HTML+Cheetah'), ('html+django', 'HTML+Django/Jinja'), ('html+evoque', 'HTML+Evoque'), ('html+genshi', 'HTML+Genshi'), ('html+handlebars', 'HTML+Handlebars'), ('html+lasso', 'HTML+Lasso'), ('html+mako', 'HTML+Mako'), ('html+myghty', 'HTML+Myghty'), ('html+ng2', 'HTML + Angular2'), ('html+php', 'HTML+PHP'), ('html+smarty', 'HTML+Smarty'), ('html+twig', 'HTML+Twig'), ('html+velocity', 'HTML+Velocity'), ('http', 'HTTP'), ('hx', 'Haxe'), ('hybris', 'Hybris'), ('hylang', 'Hy'), ('i6t', 'Inform 6 template'), ('idl', 'IDL'), ('idris', 'Idris'), ('iex', 'Elixir iex session'), ('igor', 'Igor'), ('inform6', 'Inform 6'), ('inform7', 'Inform 7'), ('ini', 'INI'), ('io', 'Io'), ('ioke', 'Ioke'), ('irc', 'IRC logs'), ('isabelle', 'Isabelle'), ('j', 'J'), ('jags', 'JAGS'), ('jasmin', 'Jasmin'), ('java', 'Java'), ('javascript+mozpreproc', 'Javascript+mozpreproc'), ('jcl', 'JCL'), ('jlcon', 'Julia console'), ('js', 'JavaScript'), ('js+cheetah', 'JavaScript+Cheetah'), ('js+django', 'JavaScript+Django/Jinja'), ('js+erb', 'JavaScript+Ruby'), ('js+genshitext', 'JavaScript+Genshi Text'), ('js+lasso', 'JavaScript+Lasso'), ('js+mako', 'JavaScript+Mako'), ('js+myghty', 'JavaScript+Myghty'), ('js+php', 'JavaScript+PHP'), ('js+smarty', 'JavaScript+Smarty'), ('jsgf', 'JSGF'), ('json', 'JSON'), ('json-object', 'JSONBareObject'), ('jsonld', 'JSON-LD'), ('jsp', 'Java Server Page'), ('julia', 'Julia'), ('juttle', 'Juttle'), ('kal', 'Kal'), ('kconfig', 'Kconfig'), ('koka', 'Koka'), ('kotlin', 'Kotlin'), ('lagda', 'Literate Agda'), ('lasso', 'Lasso'), ('lcry', 'Literate Cryptol'), ('lean', 'Lean'), ('less', 'LessCss'), ('lhs', 'Literate Haskell'), ('lidr', 'Literate Idris'), ('lighty', 'Lighttpd configuration file'), ('limbo', 'Limbo'), ('liquid', 'liquid'), ('live-script', 'LiveScript'), ('llvm', 'LLVM'), ('logos', 'Logos'), ('logtalk', 'Logtalk'), ('lsl', 'LSL'), ('lua', 'Lua'), ('make', 'Makefile'), ('mako', 'Mako'), ('maql', 'MAQL'), ('mask', 'Mask'), ('mason', 'Mason'), ('mathematica', 'Mathematica'), ('matlab', 'Matlab'), ('matlabsession', 'Matlab session'), ('md', 'markdown'), ('minid', 'MiniD'), ('modelica', 'Modelica'), ('modula2', 'Modula-2'), ('monkey', 'Monkey'), ('monte', 'Monte'), ('moocode', 'MOOCode'), ('moon', 'MoonScript'), ('mozhashpreproc', 'mozhashpreproc'), ('mozpercentpreproc', 'mozpercentpreproc'), ('mql', 'MQL'), ('mscgen', 'Mscgen'), ('mupad', 'MuPAD'), ('mxml', 'MXML'), ('myghty', 'Myghty'), ('mysql', 'MySQL'), ('nasm', 'NASM'), ('ncl', 'NCL'), ('nemerle', 'Nemerle'), ('nesc', 'nesC'), ('newlisp', 'NewLisp'), ('newspeak', 'Newspeak'), ('ng2', 'Angular2'), ('nginx', 'Nginx configuration file'), ('nim', 'Nimrod'), ('nit', 'Nit'), ('nixos', 'Nix'), ('nsis', 'NSIS'), ('numpy', 'NumPy'), ('nusmv', 'NuSMV'), ('objdump', 'objdump'), ('objdump-nasm', 'objdump-nasm'), ('objective-c', 'Objective-C'), ('objective-c++', 'Objective-C++'), ('objective-j', 'Objective-J'), ('ocaml', 'OCaml'), ('octave', 'Octave'), ('odin', 'ODIN'), ('ooc', 'Ooc'), ('opa', 'Opa'), ('openedge', 'OpenEdge ABL'), ('pacmanconf', 'PacmanConf'), ('pan', 'Pan'), ('parasail', 'ParaSail'), ('pawn', 'Pawn'), ('perl', 'Perl'), ('perl6', 'Perl6'), ('php', 'PHP'), ('pig', 'Pig'), ('pike', 'Pike'), ('pkgconfig', 'PkgConfig'), ('plpgsql', 'PL/pgSQL'), ('postgresql', 'PostgreSQL SQL dialect'), ('postscript', 'PostScript'), ('pot', 'Gettext Catalog'), ('pov', 'POVRay'), ('powershell', 'PowerShell'), ('praat', 'Praat'), ('prolog', 'Prolog'), ('properties', 'Properties'), ('protobuf', 'Protocol Buffer'), ('ps1con', 'PowerShell Session'), ('psql', 'PostgreSQL console (psql)'), ('pug', 'Pug'), ('puppet', 'Puppet'), ('py3tb', 'Python 3.0 Traceback'), ('pycon', 'Python console session'), ('pypylog', 'PyPy Log'), ('pytb', 'Python Traceback'), ('python', 'Python'), ('python3', 'Python 3'), ('qbasic', 'QBasic'), ('qml', 'QML'), ('qvto', 'QVTO'), ('racket', 'Racket'), ('ragel', 'Ragel'), ('ragel-c', 'Ragel in C Host'), ('ragel-cpp', 'Ragel in CPP Host'), ('ragel-d', 'Ragel in D Host'), ('ragel-em', 'Embedded Ragel'), ('ragel-java', 'Ragel in Java Host'), ('ragel-objc', 'Ragel in Objective C Host'), ('ragel-ruby', 'Ragel in Ruby Host'), ('raw', 'Raw token data'), ('rb', 'Ruby'), ('rbcon', 'Ruby irb session'), ('rconsole', 'RConsole'), ('rd', 'Rd'), ('rebol', 'REBOL'), ('red', 'Red'), ('redcode', 'Redcode'), ('registry', 'reg'), ('resource', 'ResourceBundle'), ('rexx', 'Rexx'), ('rhtml', 'RHTML'), ('rnc', 'Relax-NG Compact'), ('roboconf-graph', 'Roboconf Graph'), ('roboconf-instances', 'Roboconf Instances'), ('robotframework', 'RobotFramework'), ('rql', 'RQL'), ('rsl', 'RSL'), ('rst', 'reStructuredText'), ('rts', 'TrafficScript'), ('rust', 'Rust'), ('sas', 'SAS'), ('sass', 'Sass'), ('sc', 'SuperCollider'), ('scala', 'Scala'), ('scaml', 'Scaml'), ('scheme', 'Scheme'), ('scilab', 'Scilab'), ('scss', 'SCSS'), ('shen', 'Shen'), ('silver', 'Silver'), ('slim', 'Slim'), ('smali', 'Smali'), ('smalltalk', 'Smalltalk'), ('smarty', 'Smarty'), ('sml', 'Standard ML'), ('snobol', 'Snobol'), ('snowball', 'Snowball'), ('sourceslist', 'Debian Sourcelist'), ('sp', 'SourcePawn'), ('sparql', 'SPARQL'), ('spec', 'RPMSpec'), ('splus', 'S'), ('sql', 'SQL'), ('sqlite3', 'sqlite3con'), ('squidconf', 'SquidConf'), ('ssp', 'Scalate Server Page'), ('stan', 'Stan'), ('stata', 'Stata'), ('swift', 'Swift'), ('swig', 'SWIG'), ('systemverilog', 'systemverilog'), ('tads3', 'TADS 3'), ('tap', 'TAP'), ('tasm', 'TASM'), ('tcl', 'Tcl'), ('tcsh', 'Tcsh'), ('tcshcon', 'Tcsh Session'), ('tea', 'Tea'), ('termcap', 'Termcap'), ('terminfo', 'Terminfo'), ('terraform', 'Terraform'), ('tex', 'TeX'), ('text', 'Text only'), ('thrift', 'Thrift'), ('todotxt', 'Todotxt'), ('trac-wiki', 'MoinMoin/Trac Wiki markup'), ('treetop', 'Treetop'), ('ts', 'TypeScript'), ('tsql', 'Transact-SQL'), ('turtle', 'Turtle'), ('twig', 'Twig'), ('typoscript', 'TypoScript'), ('typoscriptcssdata', 'TypoScriptCssData'), ('typoscripthtmldata', 'TypoScriptHtmlData'), ('urbiscript', 'UrbiScript'), ('vala', 'Vala'), ('vb.net', 'VB.net'), ('vcl', 'VCL'), ('vclsnippets', 'VCLSnippets'), ('vctreestatus', 'VCTreeStatus'), ('velocity', 'Velocity'), ('verilog', 'verilog'), ('vgl', 'VGL'), ('vhdl', 'vhdl'), ('vim', 'VimL'), ('wdiff', 'WDiff'), ('whiley', 'Whiley'), ('x10', 'X10'), ('xml', 'XML'), ('xml+cheetah', 'XML+Cheetah'), ('xml+django', 'XML+Django/Jinja'), ('xml+erb', 'XML+Ruby'), ('xml+evoque', 'XML+Evoque'), ('xml+lasso', 'XML+Lasso'), ('xml+mako', 'XML+Mako'), ('xml+myghty', 'XML+Myghty'), ('xml+php', 'XML+PHP'), ('xml+smarty', 'XML+Smarty'), ('xml+velocity', 'XML+Velocity'), ('xquery', 'XQuery'), ('xslt', 'XSLT'), ('xtend', 'Xtend'), ('xul+mozpreproc', 'XUL+mozpreproc'), ('yaml', 'YAML'), ('yaml+jinja', 'YAML+Jinja'), ('zephir', 'Zephir')], default='python', max_length=100)),
('style', models.CharField(choices=[('abap', 'abap'), ('algol', 'algol'), ('algol_nu', 'algol_nu'), ('arduino', 'arduino'), ('autumn', 'autumn'), ('borland', 'borland'), ('bw', 'bw'), ('colorful', 'colorful'), ('default', 'default'), ('emacs', 'emacs'), ('friendly', 'friendly'), ('fruity', 'fruity'), ('igor', 'igor'), ('lovelace', 'lovelace'), ('manni', 'manni'), ('monokai', 'monokai'), ('murphy', 'murphy'), ('native', 'native'), ('paraiso-dark', 'paraiso-dark'), ('paraiso-light', 'paraiso-light'), ('pastie', 'pastie'), ('perldoc', 'perldoc'), ('rainbow_dash', 'rainbow_dash'), ('rrt', 'rrt'), ('tango', 'tango'), ('trac', 'trac'), ('vim', 'vim'), ('vs', 'vs'), ('xcode', 'xcode')], default='friendly', max_length=100)),
],
options={
'ordering': ('created',),
},
),
]
|
[
"[email protected]"
] | |
028a8b43ea0cc9b026df14de46103f4b9e5f1ec5
|
5f0a89d93ec8f5b9deeec768b0c8fb59def93791
|
/pjt/settings.py
|
e959aa27c12570bef56aad9da25ccf78a578d26f
|
[] |
no_license
|
airpong/moobytrap
|
c339beb0ddc9bbdf72e7fd25f92b63f8537fad02
|
d621374712fab5b1f3e15ef684baaebbdab36f68
|
refs/heads/master
| 2022-12-11T08:32:10.876259 | 2019-06-27T08:21:29 | 2019-06-27T08:21:29 | 194,054,043 | 0 | 0 | null | 2022-12-08T05:17:26 | 2019-06-27T08:22:58 |
HTML
|
UTF-8
|
Python
| false | false | 3,556 |
py
|
"""
Django settings for pjt project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
from decouple import config
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework_swagger',
'rest_framework',
'bootstrap4',
'accounts',
'movie',
'requests',
'bs4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pjt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'pjt','templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pjt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
AUTH_USER_MODEL = 'accounts.User'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
CORS_ORIGIN_ALLOW_ALL = True
# Heroku
import django_heroku
django_heroku.settings(locals())
|
[
"[email protected]"
] | |
8c2dda69a73d2b99c961cae0afbe05e88d8dff0a
|
e22807c554060f78d9333634ac9785687e38d794
|
/get-commit-gitname/getGitName.py
|
0a78dc249db8f08f853c1d24bfd30a7d789032cf
|
[
"Apache-2.0"
] |
permissive
|
firebird519/python
|
bfb74766977d042d825d7afc3db2d7db7ba8b6b4
|
d61b0d4e7c9d7fa36cd6d6bdd7e82c7e5de5fed1
|
refs/heads/master
| 2021-07-18T11:15:47.518327 | 2017-10-20T03:17:20 | 2017-10-20T03:17:20 | 107,625,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,043 |
py
|
#!/usr/bin/env python
# pythond version: 2.7.10
import os
import sys,urllib,string,re
import urllib2
import webbrowser
DEBUG = 1
URL_COMMIT_DETAILS_PRE = 'http://review.sonyericsson.net/changes/'
URL_END = 'detail?O=404'
GIT_NAME_KEY = 'project'
#browserCookie.py is necessary for this import.
try:
from browserCookie import *
except ImportError, e:
print e
print 'Please make sure following py file existed: "browerCookie.py"'
exit('')
def log(text):
if DEBUG:
print text
# file handler is returned by this function
def getNetJsonData(url):
#print url
#print 'Using firefox cookie to open this url:{!s}'.format(url)
cj = firefox()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0');
req.add_header('Accept', 'application/json');
req.add_header('Host', 'review.sonyericsson.net');
req.add_header('Accept-Language', 'aen-US,en;q=0.5');
req.add_header('Accept-Encoding', 'deflate');
req.add_header('Content-Type', 'application/json; charset=UTF-8');
req.add_header('x-content-type-options', 'nosniff');
fHandler = opener.open(req)
return fHandler
def getGitName(gerritNumber):
# no gerrit number inputted
if len(gerritNumber) <= 0:
return ''
gerritNumber = gerritNumber.strip('\n').strip()
#print gerritNumber
url = URL_COMMIT_DETAILS_PRE + gerritNumber + '/' + URL_END
#print 'url:',url
commitData = getNetJsonData(url)
gitName = ''
#print commitData.readlines()
while 1:
line = commitData.readline()
if len(line)==0:
break
#print line
if len(gitName) == 0:
gitName = getKeyValue(line, GIT_NAME_KEY)
if len(gitName) > 0:
break;
if len(gitName) == 0:
return ''
return gitName
# null returned if key not found.
# currently can not parser , between ""
def getKeyValue(data, key):
#print data
#print key
keyIndex = data.find(key)
if keyIndex < 0:
return ''
subString = data[keyIndex:]
log(key + ':' + subString[:20])
keyValueEndIndex = subString.find(',')
sepIndex = subString.find(':')
value = subString[sepIndex + 1: keyValueEndIndex]
value = value.strip('"')
value = value.strip()
log(key + ":" + value)
return value
# ---------------------------------------
print 'start'
try:
filehandle = open('origin.csv', 'r')
except IOError, e:
print 'file open error. exit!'
sys.exit(1)
excelFileHandler = open('result.csv', 'w')
excelFileHandler.write('PID,Title,Link,owner,Comments\n')
while 1:
line = filehandle.readline()
if len(line) == 0:
break;
line = line.strip('\n').strip()
#print line
sepIndex = line.rfind('/')
gitName = ''
if sepIndex >= 0:
gerritNumber = line[sepIndex + 1:]
gitName = getGitName(gerritNumber)
if len(gitName) == 0:
print 'get gitname failed. line data:',line
else:
print '.',
sys.stdout.flush()
ret = line + ',' + gitName + '\n'
excelFileHandler.write(ret)
filehandle.close()
excelFileHandler.close()
print 'End'
sys.exit(1)
|
[
"[email protected]"
] | |
ba426015d2b9fb7fd0c09c03f4b14631d1f0c9fa
|
8debe4980eb8cbf9ca2db8c38f4ffe594741af30
|
/build/ROS-Academy-for-Beginners/tf_demo/catkin_generated/pkg.installspace.context.pc.py
|
0b8e25b0f071410ed37dec39bcf624162a615a72
|
[] |
no_license
|
wisecube-cn/catkin_ws
|
ed2c411039b717bef7cd9d8d9394746fe5c44560
|
f5b1c792de81f8fcbac582de5a776769501b4f40
|
refs/heads/master
| 2020-03-28T10:53:13.010085 | 2018-09-10T13:00:01 | 2018-09-10T13:00:01 | 148,155,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf_demo"
PROJECT_SPACE_DIR = "/root/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"[email protected]"
] | |
3e4e138bff4e7f4cc11b96018d88814ab0abc472
|
3ed3eea7f7990fe2e0fbc62ea0e9d22d6b362e27
|
/simple-transfer.py
|
8b97f7a2a01be3c73c4744aadd0928db9e270fd2
|
[] |
no_license
|
univ-ai/TransferLearning-ai1-fall2019
|
1bb93ab27bba2e348fc757d20db7e0ee12fda9d5
|
d5a6317aa30eff3f98b288ab2dae272ad4f5852e
|
refs/heads/master
| 2020-08-03T02:29:17.743266 | 2020-01-23T10:42:06 | 2020-01-23T10:42:06 | 211,597,858 | 1 | 36 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,011 |
py
|
import os
import sys
import glob
import argparse
from keras import __version__
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.models import Sequential
from keras.layers import Dense, Flatten
from myimage import ImageDataGenerator
from keras.optimizers import SGD, RMSprop
from dogcat_data import generators, get_nb_files
import wandb
from wandb.keras import WandbCallback
run = wandb.init()
config = run.config
config.img_width = 150
config.img_height = 150
config.epochs = 30
config.batch_size = 32
train_dir = "../dogcat-data/train"
val_dir = "../dogcat-data/validation"
nb_train_samples = get_nb_files(train_dir)
nb_classes = len(glob.glob(train_dir + "/*"))
nb_val_samples = get_nb_files(val_dir)
# data prep
train_generator, validation_generator = generators(
preprocess_input, config.img_width, config.img_height, config.batch_size)
# setup model
conv_base = VGG16(include_top=False, weights='imagenet', input_shape=(150, 150, 3))
model = Sequential()
model.add(conv_base)
model.add(Flatten(input_shape=conv_base.output_shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
print('This is the number of trainable weights '
'before freezing the conv base:', len(model.trainable_weights))
conv_base.trainable = False
print('This is the number of trainable weights '
'after freezing the conv base:', len(model.trainable_weights))
# transfer learning
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=2e-5),
metrics=['acc'])
train_generator, validation_generator = generators(
preprocess_input, config.img_width, config.img_height, config.batch_size)
model.fit_generator(
train_generator,
epochs=config.epochs,
steps_per_epoch=nb_train_samples // config.batch_size,
validation_data=validation_generator,
validation_steps=nb_val_samples // config.batch_size,
callbacks=[WandbCallback()])
model.save('transfered_simple.h5')
|
[
"[email protected]"
] | |
2164f1bf87f10973960f9fd7b6d9ba2cdcc3cdf6
|
3a4ede640e0fe40eb92618eaf4523298ba57c526
|
/wlcsim/mc/sphere_insertion.py
|
c0aa743f387dc8e74d2640c07a400a07ec1c8f1f
|
[
"MIT"
] |
permissive
|
riscalab/wlcsim
|
ae716ca0421e55ff7dc39ced823a2ff0ac1f5633
|
64236a7c9a378f1d70126ada6c8b18c2fa936c24
|
refs/heads/master
| 2023-05-26T03:34:59.449278 | 2023-05-13T21:41:18 | 2023-05-13T21:41:18 | 211,365,885 | 1 | 0 | null | 2019-09-27T17:04:14 | 2019-09-27T17:04:14 | null |
UTF-8
|
Python
| false | false | 11,199 |
py
|
"""Monte Carlo of inserting stuff inside of a sphere."""
from enum import Enum
from numba import jit
import numpy as np
import math
from .. import plot as wplot
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
SMALL_NUM = 10e-8
# @jit(nopython=True)
def is_inside(x, xr, r):
"""is the sphere of radius xr totally inside the confining sphere of radius
r centered at the origin?
||x|| + xr < r ?"""
return x[0]*x[0] + x[1]*x[1] + x[2]*x[2] < (r - xr)*(r - xr)
# @jit(nopython=True)
def norm3_squared(x):
return x[0]*x[0] + x[1]*x[1] + x[2]*x[2]
# @jit(nopython=True)
def weeks_chandler_anderson(r, sigma, epsilon=1):
"""WCA Interaction Potential (a rescaled "repulsive part of lennard jones"
potential) with epsilon defaulting to 1, so that you can fix beta*epsilon
elsewhere in the simulation."""
# r = x2 - x1
r2 = norm3_squared(r)
if r2 < SMALL_NUM:
return np.inf
sigma2 = sigma*sigma
if math.pow(2, 1/3)*sigma2 < r2:
return 0
sigr = sigma2/r2
sigr = sigr*sigr*sigr
return 4*epsilon*(sigr*(sigr - 1) + 1/4)
# # turns out this can be made into a clever weeks_chandler_anderson call
# @jit(nopython=True)
# def confined_in_sphere(x, r_x, r_conf, epsilon=1):
# """E_confinement = E_WCA(r_conf - ||x||)."""
# r = r_conf - np.linalg.norm(x)
def num_spheres_from_density(target_density, sphere_radii, confinement_radius):
# num_spheres*sphere_volume = target_density*confinement_volume
sphere_volume = volume_of_sphere(sphere_radii)
confinement_volume = volume_of_sphere(confinement_radius)
num_spheres = math.floor(target_density*confinement_volume/sphere_volume)
return num_spheres
def initial_locations(num_spheres, sphere_radii, confinement_radius):
"""Get uniformly random initial locations for Metropolis Algorithms to
start at."""
num_successful = 0
sphere_centers = np.zeros((num_spheres, 3))
while num_successful < num_spheres:
new_pos = 2*confinement_radius*(np.random.random_sample((3,)) - 1/2)
if is_inside(new_pos, sphere_radii, confinement_radius):
sphere_centers[num_successful, :] = new_pos
num_successful += 1
return sphere_centers
# @jit(nopython=True)
def volume_of_sphere(radius):
return (4/3)*math.pi*(radius**3)
def single_wca_energy(sphere_pos, sphere_idx, sphere_centers, num_spheres, sphere_radii,
confinement_radius=1, energy_outside=np.inf):
"""Return contribution to energy of a particle at position sphere_pos if it
replaced the particle with index sphere_idx in the list of particles
sphere_centers corresponding to spheres of radius sphere_radii inside of a
confining sphere of radius confinement_radius=1 centered at the origin.
Distance from confinement has an energetic contribution equal to being that
same distance from another sphere.
Use energy_outside=np.inf to set the energy associated with being totally
outside the confinement. Set energy_outside to None to honor the CWA
potential symmetrically outside the sphere."""
energy = 0
dist_to_bdry = confinement_radius - np.linalg.norm(sphere_pos)
if dist_to_bdry < 0 and energy_outside is not None:
energy += energy_outside
else:
# vector with correct magnitude to pass to weeks_chandler_anderson
vec_to_bdry = np.array([dist_to_bdry, 0, 0])
energy += weeks_chandler_anderson(vec_to_bdry, 2*sphere_radii)
for sj in range(num_spheres):
if sj == sphere_idx:
continue
energy += weeks_chandler_anderson(sphere_pos - sphere_centers[sj,:], 2*sphere_radii)
return energy
# @jit(nopython=True)
def total_wca_energy(sphere_centers, num_spheres, sphere_radii,
confinement_radius=1, energy_outside=np.inf):
"""Return total energy of particles in sphere_centers corresponding to
spheres of radius sphere_radii inside of a confining sphere of radius
confinement_radius=1 centered at the origin. Distance from confinement has an energetic
contribution equal to being that same distance from another sphere.
Use energy_outside=np.inf to set the energy associated with being totally
outside the confinement. Set energy_outside to None to honor the CWA
potential symmetrically outside the sphere."""
energy = 0
for si in range(num_spheres):
# confinement energy
dist_to_bdry = confinement_radius - np.linalg.norm(sphere_centers[si,:])
if dist_to_bdry < 0 and energy_outside is not None:
energy += energy_outside
else:
# vector with correct magnitude to pass to weeks_chandler_anderson
vec_to_bdry = np.array([dist_to_bdry, 0, 0])
energy += weeks_chandler_anderson(vec_to_bdry, 2*sphere_radii)
# interaction energy
for sj in range(si):
energy += weeks_chandler_anderson(sphere_centers[si,:] - sphere_centers[sj,:], 2*sphere_radii)
return energy
def norm_step_mc(num_steps, sphere_centers, num_spheres, sphere_radii, confinement_radius=1,
step_size=None, beta_epsilon=0.665, energy_outside=np.inf):
"""Peform num_steps monte carlo steps on the set of spheres with
sphere_centers, sphere_radii in a confinement centered at the origin of
radius confinement_radius. At each step, move one bead by a gaussian amount
with std dev step_size(default->sphere_radii). The beads are assumed to
have a weeks_chandler_anderson potential between them with sigma=sphere_radii, and
epsilon and the temperature are determined by beta_epsilon. The confinement
sphere is also weeks_chandler_anderson."""
# default step size to be on average the size of the bead
step_size = sphere_radii if step_size is None else step_size
tot_energy_change = 0
for i in range(num_steps):
si = np.random.randint(num_spheres)
# new positions energy calculation, and exit early if possible
new_pos = sphere_centers[si,:] + step_size*np.random.standard_normal((3,))
new_dist_to_bdry = confinement_radius - np.linalg.norm(new_pos)
if new_dist_to_bdry < 0: # then you're outisde the bdry
continue
new_potential = single_wca_energy(new_pos, si, sphere_centers, num_spheres, sphere_radii, confinement_radius, energy_outside)
old_potential = single_wca_energy(sphere_centers[si,:], si, sphere_centers, num_spheres, sphere_radii, confinement_radius, energy_outside)
pot_diff = new_potential - old_potential
# MH acceptance rule, most short-circuitable form
if pot_diff > 0 or np.log(np.random.rand()) >= -beta_epsilon*pot_diff:
continue
# if we get here, the monte carlo step is accepted.
sphere_centers[si,:] = new_pos
tot_energy_change += pot_diff
return sphere_centers, tot_energy_change
def sphere_dispersal_mc(num_steps, target_density, sphere_radii, confinement_radius=1,
steps_per_check=1000, step_size=None,
beta_epsilon=0.665, initial_centers=None):
"""Perform MCMC for num_steps after uniform position initialization of
spheres with effective hard repulsive size sphere_radii (accomplished by
weeks_chandler_anderson potential with barker-henderson mean collision
diameter set to equal sigma) at target_density inside of a sphere with size
confinement_radius.
For now, beta_epsilon fixed to 0.665, per what Tom did in his thesis."""
num_spheres = num_spheres_from_density(target_density, sphere_radii, confinement_radius)
if initial_centers is None:
sphere_centers = initial_locations(num_spheres, sphere_radii, confinement_radius)
else:
sphere_centers = initial_centers
# break run into shorter sprints of 1000 steps, report energy change after
# each 1000 steps
num_checks = math.floor(num_steps/steps_per_check)
energy = total_wca_energy(sphere_centers, num_spheres, sphere_radii, confinement_radius)
for i in range(num_checks):
# run MC, reports energy change
sphere_centers, d_energy = norm_step_mc(num_steps=steps_per_check,
sphere_centers=sphere_centers, num_spheres=num_spheres,
sphere_radii=sphere_radii, confinement_radius=confinement_radius,
step_size=step_size, beta_epsilon=beta_epsilon)
print(energy + d_energy)
energy = total_wca_energy(sphere_centers, num_spheres, sphere_radii, confinement_radius)
print(energy)
return sphere_centers
def mc_minimize_energy(target_density, sphere_radii, confinement_radius=1,
steps_per_check=10000, step_size=None, beta_epsilon=0.665,
initial_centers=None):
"""Perform MCMC after uniform position initialization of
spheres with effective hard repulsive size sphere_radii (accomplished by
weeks_chandler_anderson potential with barker-henderson mean collision
diameter set to equal sigma) at target_density inside of a sphere with size
confinement_radius. Rerun in batches of steps_per_check MC steps until the
energies converge. To check if energy has converged, we do the lazy thing:
wait till it increases for the first time then just run one more MC batch
after that.
For now, beta_epsilon fixed to 0.665, per what Tom did in his thesis."""
num_spheres = num_spheres_from_density(target_density, sphere_radii, confinement_radius)
if initial_centers is None:
sphere_centers = initial_locations(num_spheres, sphere_radii, confinement_radius)
else:
sphere_centers = initial_centers
energy = total_wca_energy(sphere_centers, num_spheres, sphere_radii, confinement_radius)
i = 0
while True:
print("Total Energy after batch {i}: {energy: =10.8g}".format(i=i, energy=energy))
sphere_centers, d_energy = norm_step_mc(num_steps=steps_per_check,
sphere_centers=sphere_centers, num_spheres=num_spheres,
sphere_radii=sphere_radii, confinement_radius=confinement_radius,
step_size=step_size, beta_epsilon=beta_epsilon)
i += 1
energy += d_energy
if d_energy > 0:
print("Total Energy after batch {i}: {energy: =10.8g}".format(i=i, energy=energy))
sphere_centers, d_energy = norm_step_mc(num_steps=steps_per_check,
sphere_centers=sphere_centers, num_spheres=num_spheres,
sphere_radii=sphere_radii, confinement_radius=confinement_radius,
step_size=step_size, beta_epsilon=beta_epsilon)
i += 1
energy += d_energy
print("Total Energy after batch {i}: {energy: =10.8g}".format(i=i, energy=energy))
break
return sphere_centers
def plot_spheres(sphere_centers, radii, **kwargs):
fig = plt.figure()
ax = fig.gca(projection='3d')
palette = sns.color_palette('hls', 12)
for i,c in enumerate(sphere_centers):
color = palette[i % len(palette)]
wplot.draw_sphere(c, radii, colors=color, axes=ax, **kwargs)
return ax
|
[
"[email protected]"
] | |
0b4ada6ee9bc3a49589ff4a8dc8621b475fb3800
|
822c566d3fe100f216284276d94341527a4af7a1
|
/class_based/spin/spin/asgi.py
|
bb331524cd68a94bd0362c3e57157d55b83b6cd6
|
[] |
no_license
|
ybgirgin3/spin
|
9f96016f17a6e77faa147bff47733b70da16014d
|
94afbcf62714d1f6c3b89c661390740fedb3c9ac
|
refs/heads/main
| 2023-06-10T17:48:45.538831 | 2021-06-23T17:15:16 | 2021-06-23T17:15:16 | 377,943,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 385 |
py
|
"""
ASGI config for spin project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spin.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
0bdeece5a1628bcf49cf96dcab8289c3b2f0a78d
|
7d6000cc4d3a45c6fd3dd39c6a5cc05a72a78a93
|
/app/recipe/serializers.py
|
73f905c00b62d96586c15c3a36a669ac5e4eca01
|
[
"MIT"
] |
permissive
|
samims/recipe-app-api
|
9b5a5d25a584693e15639ef192339d551baf1e9f
|
7ff800e0548bae779ab0ee225e9aa2e2159e4043
|
refs/heads/master
| 2020-04-27T00:25:36.231859 | 2019-05-23T10:09:50 | 2019-05-23T10:09:50 | 173,933,369 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,485 |
py
|
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ("id", "name")
read_only_fields = ("id",)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredient objects"""
class Meta:
model = Ingredient
fields = ("id", "name")
read_only_fields = ("id",)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer a recipe"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True, queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = (
"id",
"title",
"ingredients",
"tags",
"time_minutes",
"price",
"link"
)
read_only_fields = ("id",)
class RecipeDetailSerializer(RecipeSerializer):
"""Serialize a Recipe Detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
|
[
""
] | |
94091a71b984024c36aea8b4130b273ec817a02f
|
2504dafe6e53be0c2e4c6deffa7eb82b9ec139de
|
/one.py
|
79507c1e4827e04fa1911d085dae54db71cb4213
|
[
"MIT"
] |
permissive
|
dollarkillerx/PyTorchStudy
|
62d6b2d01daa2643ec3a9ddb5fe002432ab51175
|
c17b2973c89e3a2f088513f29bd5eb6f47957585
|
refs/heads/master
| 2020-11-28T23:06:37.281519 | 2019-12-24T14:06:49 | 2019-12-24T14:06:49 | 229,944,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 856 |
py
|
import torch
# 定义向量
vectro = torch.tensor([1,2,3,4])
print("Vector:\t\t",vectro)
print('Vector: Shape:\t',vectro.shape)
# 定义矩阵
matrix = torch.tensor([[1,2],[3,4]])
print('Matrix:\n',matrix)
print('Matrix Shape:\n',matrix.shape)
# 定义张量
tensor = torch.tensor([ [ [1,2],[3,4] ], [ [5,6],[7,8] ] ])
print('Tensor:\n',tensor)
print('Tensor Shape:\n',tensor.shape)
# Autograd 完成所有的梯度下降和反向传递
# 在autograd下 反向传递(backprop)代码自动定义
# .requires_grad
# 在tensor上设定.requires_grad=true后,autograd会自动跟踪所有与改tensor有关的所有运算
# .backward()
# 所有运算完成后,执行.backward(),,autograd会自动计算梯度并执行反向传递
# .grad
# 用来访问梯度
# with torch.no_grad()
# 自动忽略梯度
|
[
"[email protected]"
] | |
bea96b1088510cef4846179cadb65027bb97bb9d
|
b415828936d047ca1fca77bcb488ee1bb56bdb29
|
/backend/tweet/migrations/0003_auto_20210128_2115.py
|
3f4306089058adbec0eaa6352edfe228fc38f9ee
|
[] |
no_license
|
Smerlinski/twitterlike
|
d482d88c1a595da12f25818c3166be99ae378286
|
98c63182e3eb542bbb4123b411a15095b6f12a2d
|
refs/heads/main
| 2023-02-23T18:36:12.921841 | 2021-02-01T09:49:02 | 2021-02-01T09:49:02 | 333,707,122 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
# Generated by Django 2.2.17 on 2021-01-28 20:15
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tweet', '0002_commentlike_tweetlike'),
]
operations = [
migrations.AddField(
model_name='comment',
name='likes',
field=models.ManyToManyField(related_name='comment_likes', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tweet',
name='likes',
field=models.ManyToManyField(related_name='tweet_likes', to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
8a0848652a216c54c6483dd93481724a0b600cde
|
1d70bed8b3e7314cac8a1b5cb8e20a98924d0746
|
/gdp and stock predicton/modules.py
|
6d2685b1da0a102bf5bcb75c678ec0dfd2a0d57a
|
[] |
no_license
|
bateikoEd/dipl_program
|
02d46f2342d2814ed58181f38f9a781effeedd05
|
0b885c436cda096c80fe2b445337dc7e0bf16ba0
|
refs/heads/main
| 2023-07-24T05:42:05.509338 | 2021-09-06T06:36:18 | 2021-09-06T06:36:18 | 344,238,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,647 |
py
|
import pandas as pd
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection import cross_val_score
import numpy as np
from sklearn.metrics import r2_score
# from sklearn.metrics import mean_absolute_percentage_error
from statsmodels.stats.stattools import durbin_watson
from sklearn.metrics import explained_variance_score
def barplot(data, title):
# fig = plt.figure(figsize=(18,6))
bar_plot = sns.barplot(x=data['feature'], y=data['value'])
for item in bar_plot.get_xticklabels():
item.set_rotation(90)
plt.title(title)
plt.show()
def get_score_for_model(models, X_train, y_train, scoring, n_splits=3,print_res=True):
def append_res_to_boxplot():
i = 0
df = pd.DataFrame()
while i < len(results[0]):
line = [[num[i], ml] for num, ml in zip(results, names)]
# for num, ml in zip(results, names):
# line.append([num[i],ml])
i = i + 1
df = df.append(pd.DataFrame(line, columns=[scoring, 'ML']), ignore_index=True)
return df
seed = 13
results = []
means = []
sdv = []
names = []
scoring = scoring
for name, model in models:
strat = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_results = cross_val_score(model, X_train, y_train, cv=strat, scoring=scoring, n_jobs=-1)
results.append(cv_results)
names.append(name)
means.append(cv_results.mean())
sdv.append(cv_results.std())
if print_res:
print(f"{names[-1]}: {means[-1]} ({sdv[-1]})")
box_plot = append_res_to_boxplot()
df_means = pd.DataFrame({'ML': names, 'means': means, 'std': sdv})
return box_plot, df_means
def define_metrics(model, X_train_, X_test_, y_train, y_test, name):
pred_train_ = np.array(model.predict(X_train_))
pred_test_ = np.array(model.predict(X_test_))
y_train_ = np.array(y_train)
y_test_ = np.array(y_test)
metric_train = pd.DataFrame()
metric_train['name'] = [name + '_train']
metric_train['r2'] = [r2_score(y_train, pred_train_)]
metric_train['sum_squared_resid'] = np.sum((y_train_ - pred_train_)**2)
metric_train['MAPE'] = [np.mean(np.abs((y_train - pred_train_) / y_train)) * 100]
metric_train['RMSE'] = [np.sqrt(np.mean((y_train - pred_train_)**2))]
metric_train['durbin_watson'] = [durbin_watson(y_train - pred_train_)]
metric_train['theil_index'] = [np.sqrt((1/len(pred_train_))*np.sum((y_train_-pred_train_)**2))
/ (np.sqrt((1/len(y_train_))*np.sum(y_train_**2)) + np.sqrt((1/len(pred_train_))*np.sum(pred_train_**2)))]
metric_train['ex_var'] = [explained_variance_score(y_train, pred_train_)]
metric_test = pd.DataFrame()
metric_test['name'] = [name + '_test']
metric_test['r2'] = [r2_score(y_test, pred_test_)]
metric_test['sum_squared_resid'] = np.sum((y_test_ - pred_test_)**2)
metric_test['MAPE'] = [np.mean(np.abs((y_test - pred_test_) / y_test)) * 100]
metric_test['RMSE'] = [np.sqrt(np.mean((y_test - pred_test_) ** 2))]
metric_test['durbin_watson'] = [durbin_watson(y_test - pred_test_)]
metric_test['theil_index'] = [np.sqrt((1/len(pred_test_))*np.sum((y_test_-pred_test_)**2))
/ (np.sqrt((1/len(y_test_))*np.sum(y_test_**2)) + np.sqrt((1/len(pred_test_))*np.sum(pred_test_**2)))]
metric_test['ex_var'] = [explained_variance_score(y_test, pred_test_)]
return metric_train.append(metric_test)
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
a780c71aa45f05efbbf6ac177b608b0cc54997b7
|
71d3e539e3ba8ab06d61bfb3683414a129a4d744
|
/detecting_objects/image_evaluator/src/image_evaluator.py
|
76b3bf77e4ab4ce278f0921170cf311b8441bb6f
|
[] |
no_license
|
browlm13/Basketball-Shot-Detection
|
b0dfbc0de3129917697b077a59f5519a7faecc57
|
8ea4e35effdf2c6f02d2d275cd3d48d9da218608
|
refs/heads/master
| 2021-10-27T21:10:11.241146 | 2019-04-19T18:50:57 | 2019-04-19T18:50:57 | 105,725,060 | 83 | 16 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,400 |
py
|
#python3
"""
Image Evaluator Class
img_eval = Image_Evaluator()
# Loading Models - Todo: store in file so only model name has to be used
BASKETBALL_MODEL = {'name' : 'basketball_model', 'paths' : {'frozen graph': PATH_TO_FROZEN_GRAPH, 'labels' : PATH_TO_LABELS}}
PERSON_MODEL = {'name' : 'person_model', 'paths' : {'frozen graph': PATH_TO_FROZEN_GRAPH, 'labels' : PATH_TO_LABELS}}
img_eval.load_models([BASKETBALL_MODEL, PERSON_MODEL])
todo: img_eval.annotate_directory(image_directory, annotations_directory) #Add selected categories and minscores
todo: cropping
"""
import numpy as np
import os
from PIL import Image
import PIL.Image as Image
import xml.etree.ElementTree as ET
from xml.dom import minidom
import tensorflow as tf
#from utils import label_map_util
from image_evaluator.src.utils import label_map_util
import glob
import shutil
#from shutil import copyfile
#from shutil import copy
class Image_Evaluator:
def __init__(self):
self.models = []
self.categories = {}
def load_models(self, model_list):
#Todo: ensure existance
self.models = model_list
#determine categories
for m in self.models:
#get each models label dict
m['categories'] = label_map_util.get_label_map_dict( m['paths']['labels'], use_display_name=m['use_display_name'] )
#go through models, for each unique category list all models that can identify, use first as evaluation model
for m in self.models:
for key in m['categories']:
if key in self.categories:
self.categories[key]['models'].append(m['name'])
else:
self.categories[key] = {'models' : [m['name']], 'evaluation_model' : m['name']}
#set all evaluaton models used (what needs to be loaded into memory for image evaluation)
def get_evaluation_models(self):
evaluation_models = []
for c in self.categories:
if self.categories[c]['evaluation_model'] not in evaluation_models:
evaluation_models.append(self.categories[c]['evaluation_model'])
return evaluation_models
def set_category_evaluation_model(self, category_name, model_name):
self.categories[category_name]['evaluation_model'] = model_name
#path, folder, filename
def get_path_data(self, path):
folder = os.path.basename(os.path.dirname(path))
filename = os.path.basename(path)
return path, folder, filename
def get_model_path(self, model_name, file_name):
path = ""
for model in self.models:
if model['name'] == model_name:
path = model['paths'][file_name]
return path
def get_model_categories_dict(self, model_name):
for model in self.models:
if model['name'] == model_name:
return model['categories']
def get_model_evaluation_categories(self, model_name):
evaluation_categories = []
for c in self.categories:
if self.categories[c]['evaluation_model'] == model_name:
evaluation_categories.append(c)
return evaluation_categories
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def image_dimensions(self, image_np):
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
return image_pil.size
#
# Writing Image XML annotations
#
def swap_exentsion(self, full_filename, new_extension):
template = "%s.%s" # filename, extension
filename_base, old_extension = os.path.splitext(full_filename)
return template % (filename_base, new_extension.strip('.'))
def generate_new_filename(self, output_directory_path, image_info, new_extension):
new_filename = self.swap_exentsion(image_info['image_filename'], new_extension)
full_path = os.path.join(output_directory_path, new_filename)
return full_path
def generate_xml_string(self, image_info):
image_data = {}
image_data['path'] = image_info['image_path']
image_data['folder'] = image_info['image_folder']
image_data['filename'] = image_info['image_filename']
image_data['width'] = image_info['image_width']
image_data['height'] = image_info['image_height']
image_data['depth'] = 3
#unspecifeid
image_data['database'] = 'NA'
image_data['segmented'] = 0
image_data['objects'] = []
for item in image_info['image_items_list']:
o = {}
o['name'] = item['class']
xmin, xmax, ymin, ymax = item['box']
o['xmin'] = xmin
o['ymin'] = ymin
o['xmax'] = xmax
o['ymax'] = ymax
#unspecifeid
o['pose'] = 'Unspecified'
o['truncated'] = 0
o['difficult'] = 0
image_data['objects'].append(o)
# create XML
annotation_tag = ET.Element('annotation')
folder_tag = ET.SubElement(annotation_tag, 'folder')
folder_tag.text = image_data['folder']
filename_tag = ET.SubElement(annotation_tag, 'filename')
filename_tag.text = image_data['filename']
path_tag = ET.SubElement(annotation_tag, 'path')
path_tag.text = image_data['path']
source_tag = ET.SubElement(annotation_tag, 'source')
database_tag = ET.SubElement(source_tag, 'database')
database_tag.text = image_data['database']
size_tag = ET.SubElement(annotation_tag, 'size')
width_tag = ET.SubElement(size_tag, 'width')
width_tag.text = str(image_data['width'])
height_tag = ET.SubElement(size_tag, 'height')
height_tag.text = str(image_data['height'])
depth_tag = ET.SubElement(size_tag, 'depth')
depth_tag.text = str(image_data['depth'])
segmented_tag = ET.SubElement(annotation_tag, 'segmented')
segmented_tag.text = str(0)
for o in image_data['objects']:
object_tag = ET.SubElement(annotation_tag, 'object')
name_tag = ET.SubElement(object_tag, 'name')
name_tag.text = o['name']
pose_tag = ET.SubElement(object_tag, 'pose')
pose_tag.text = o['pose']
truncated_tag = ET.SubElement(object_tag, 'truncated')
truncated_tag.text = str(o['truncated'])
difficult_tag = ET.SubElement(object_tag, 'difficult')
difficult_tag.text = str(o['difficult'])
bndbox_tag = ET.SubElement(object_tag, 'bndbox')
xmin_tag = ET.SubElement(bndbox_tag, 'xmin')
xmin_tag.text = str(o['xmin'])
ymin_tag = ET.SubElement(bndbox_tag, 'ymin')
ymin_tag.text = str(o['ymin'])
xmax_tag = ET.SubElement(bndbox_tag, 'xmax')
xmax_tag.text = str(o['xmax'])
ymax_tag = ET.SubElement(bndbox_tag, 'ymax')
ymax_tag.text = str(o['ymax'])
#return ET.tostring(annotation_tag).decode('utf-8')
dom = minidom.parseString(ET.tostring(annotation_tag).decode('utf-8'))
return dom.toprettyxml(indent='\t')
def write_xml_file(self, image_info, outpath):
# if directorydoes not exist, create it
if not os.path.exists(outpath):
os.makedirs(outpath)
xml_string = self.generate_xml_string(image_info)
xml_filename = self.generate_new_filename(outpath, image_info, 'xml')
with open(xml_filename, "w") as f:
f.write(xml_string)
def filter_minimum_score_threshold(self, image_info_bundel, min_score_thresh):
filtered_image_info_bundel = {}
for image_path, image_info in image_info_bundel.items():
filtered_image_info_bundel[image_path] = image_info
filtered_image_items_list = []
for item in image_info['image_items_list']:
if item['score'] > min_score_thresh:
filtered_image_items_list.append(item)
filtered_image_info_bundel[image_path]['image_items_list'] = filtered_image_items_list
return filtered_image_info_bundel
def filter_selected_categories(self, image_info_bundel, selected_categories_list):
filtered_image_info_bundel = {}
for image_path, image_info in image_info_bundel.items():
filtered_image_info_bundel[image_path] = image_info
filtered_image_items_list = []
for item in image_info['image_items_list']:
if item['class'] in selected_categories_list:
filtered_image_items_list.append(item)
filtered_image_info_bundel[image_path]['image_items_list'] = filtered_image_items_list
return filtered_image_info_bundel
def _image_info(self, category_index, selected_categories, image_np, boxes, scores, classes, min_score_thresh=0.0001):
# retrieve image size
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
im_width, im_height = image_pil.size
#box, class, score
item_list = []
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_thresh:
item = {}
#
# box
#
normalized_box = tuple(boxes[i].tolist())
n_ymin, n_xmin, n_ymax, n_xmax = normalized_box
box = (int(n_xmin * im_width), int(n_xmax * im_width), int(n_ymin * im_height), int(n_ymax * im_height)) #(left, right, top, bottom)
item['box'] = box
#
# class name
#
class_name = 'NA'
if classes[i] in category_index.keys():
class_name = str(category_index[classes[i]]['name'])
item['class'] = class_name
#
# detection score
#
item['score'] = 100*scores[i]
# add if class is in selected_classes, to ensure only evaluation model is evalutating
if item['class'] in selected_categories:
item_list.append(item)
return item_list
def get_image_info(self, image_path_list, min_score_thresh=None, prevent_overlap=True):
image_info_bundel = dict((image_path, {'image_items_list':[], 'image_folder':'', 'image_filename':'','image_path':'', 'image_height':-1, 'image_width':-1}) for image_path in image_path_list) #key= path, value is cobined item list
# for each unique model evaluator in categories list perform detection
for model_name in self.get_evaluation_models():
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.get_model_path(model_name, 'frozen graph'), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
path_to_labels = self.get_model_path(model_name, 'labels')
label_map = label_map_util.load_labelmap(path_to_labels)
categories_dict = self.get_model_categories_dict(model_name)
num_classes = len(categories_dict)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#
# Detection
#
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Definite input and output Tensors for detection_graph
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each box represents a part of the image where a particular object was detected.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') # Each score represent how level of confidence for each of the objects.
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
#
# Image Detection Loop
#
for image_path in image_path_list:
#
# prepare image for model input
#
#tmp non relitive path test
script_dir = os.path.dirname(os.path.abspath(__file__))
image = Image.open(os.path.join(script_dir, image_path))
#image = Image.open(image_path)
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0) # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
#
# Detection
#
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
"""
# new code
if prevent_overlap:
iou_threshold = 0.5 #overlap threshold
max_output_size = 2 #max num boxes overlap threshold
selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold)
boxes = tf.gather(boxes, selected_indices) #returns selected boxes
scores = tf.gather(np.squeeze(scores), selected_indices) #returns selected
classes = tf.gather(np.squeeze(classes), selected_indices) #returns selected
"""
#
# Reformat results
#
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
#
# Get selected items (box, class, score)
#
#selected classes are all categories current model is set to evaluate
selected_categories = self.get_model_evaluation_categories(model_name)
image_items_list = []
if min_score_thresh is not None:
mst_decimal = min_score_thresh * 0.01 #convert to decimal
image_items_list = self._image_info(category_index, selected_categories, image_np, boxes, scores, classes, mst_decimal)
else:
image_items_list = self._image_info(category_index, selected_categories, image_np, boxes, scores, classes)
# add to / combine image items list
image_info_bundel[image_path]['image_items_list'] += image_items_list
#
# meta data - PLEASE STORE FOR USE IN XML ANNOTATIONS
#
image_path, image_folder, image_filename = self.get_path_data(image_path)
image_height, image_width = self.image_dimensions(image_np)
image_info_bundel[image_path]['image_path'] = image_path
image_info_bundel[image_path]['image_folder'] = image_folder
image_info_bundel[image_path]['image_filename'] = image_filename
image_info_bundel[image_path]['image_height'] = image_height
image_info_bundel[image_path]['image_width'] = image_width
return image_info_bundel
def remove_string_start_end_whitespace(self, string):
if string[0] == ' ':
string = string[1:]
if string[-1] == ' ':
string = string[:-1]
return string
def category_2_symbol(self, category_name):
return category_name.strip()
def _any(self, category_name, min_score, image_items_list):
""" return True if one or more of the category name was detected above minimum score """
for item in image_items_list:
if (item['class'] == category_name) and (item['score'] > min_score): return True
return False
def _num(self, category_name, min_score, image_items_list):
""" return number of the category name detected above minimum score """
num_detected = 0
for item in image_items_list:
if (item['class'] == category_name) and (item['score'] > min_score): num_detected += 1
return num_detected
def boolean_image_evaluation(self, image_path_list, boolean_categories_present):
""" accepts list of paths to images and common boolean expression of categories present ex: any('person',30.0) or (num('basketball', 60.0) > 2)"""
image_info_bundel = self.get_image_info(image_path_list)
image_boolean_bundel = dict((image_path, False) for image_path in image_path_list) #key= path, value is set to false initally
for image_path, image_info in image_info_bundel.items():
any = lambda category_name, min_score : self._any(category_name, min_score, image_info['image_items_list'])
num = lambda category_name, min_score : self._num(category_name, min_score, image_info['image_items_list'])
scope = locals()
image_boolean_bundel[image_path] = eval(boolean_categories_present, scope)
return image_boolean_bundel, image_info_bundel
def move_images_bool_rule(self, input_image_directory_path, image_output_directory_path, bool_rule, annotations_output_directory_path = False, annotations_min_score_thresh=None, annotations_selected_category_list=None):
""" given input directory of images (currently JPEG), move selected images that satisfy bool rule to new directory, create annotation directory (xml) if specifeid. """
# get all image paths in directory
accpeted_extensions = ['jpg', 'JPEG', 'jpeg']
image_path_list = []
for extension in accpeted_extensions:
glob_phrase = os.path.join(input_image_directory_path, '*.' + extension)
for image_path in glob.glob(glob_phrase):
#check image can be reshpaed tmp
try:
script_dir = os.path.dirname(os.path.abspath(__file__))
image = Image.open(os.path.join(script_dir, image_path))
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0) # Expand dimensions since the model
#add
image_path_list += [image_path]
#tmp
print(image_path)
except:
print("error loading: %s" % image_path)
# evaluate
image_boolean_bundel, image_info_bundel = self.boolean_image_evaluation(image_path_list, bool_rule)
# if image output directory does not exist, create it
if not os.path.exists(image_output_directory_path): os.makedirs(image_output_directory_path)
# copy images over with same basename
for image_path, copy_bool in image_boolean_bundel.items():
if copy_bool: shutil.copy(image_path, image_output_directory_path)
#annotations
# if image output directory does not exist, create it
if annotations_output_directory_path is not False:
if not os.path.exists(annotations_output_directory_path): os.makedirs(annotations_output_directory_path)
#filter selected categories and min score threshold for image_info_bundel
if annotations_selected_category_list is not None:
image_info_bundel = self.filter_selected_categories(image_info_bundel, annotations_selected_category_list)
if annotations_min_score_thresh is not None:
image_info_bundel = self.filter_minimum_score_threshold(image_info_bundel, annotations_min_score_thresh)
#change image location data and write xml file
for image_path, image_info in image_info_bundel.items():
#if bool statment is true
if image_boolean_bundel[image_path]:
#change image location info
new_image_info = image_info
new_image_filename = os.path.basename(image_path) #same technically
new_image_folder = os.path.basename(image_output_directory_path)
new_image_path = os.path.join(image_output_directory_path, new_image_filename)
new_image_info['image_path'] = new_image_path
new_image_info['image_folder'] = new_image_folder
new_image_info['image_filename'] = new_image_filename
#write
self.write_xml_file(new_image_info, annotations_output_directory_path)
def run():
pass
"""
BASKETBALL_MODEL = {'name' : 'basketball_model_v1', 'use_display_name' : False, 'paths' : {'frozen graph': "models/basketball_model_v1/frozen_inference_graph/frozen_inference_graph.pb", 'labels' : "models/basketball_model_v1/label_map.pbtxt"}}
PERSON_MODEL = {'name' : 'ssd_mobilenet_v1_coco_2017_11_17', 'use_display_name' : True, 'paths' : {'frozen graph': "models/ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph/frozen_inference_graph.pb", 'labels' : "models/ssd_mobilenet_v1_coco_2017_11_17/mscoco_label_map.pbtxt"}}
ie = Image_Evaluator()
ie.load_models([BASKETBALL_MODEL, PERSON_MODEL])
image_input_base_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/google-images-download/downloads/"
# for each directory in downloads
for image_dir_path in glob.glob(image_input_base_directory_path + "/*/"):
dirname = os.path.basename(image_dir_path[:-1])
print(dirname)
image_input_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/google-images-download/downloads/" + dirname
image_output_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/gather/%s_images" % dirname
annotation_output_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/gather/%s_annotations" % dirname
bool_rule = "(any('basketball', 85.0) and not any('person', 15.0)) or ((num('person', 95.0) == 1) and not any('basketball', 15.0) and (num('person', 15.0) == 1)) or (any('basketball', 85.0) and (num('person', 95.0) ==1) and (num('person', 15.0) == 1))"
#print(image_input_directory_path)
#print(image_output_directory_path)
#print(annotation_output_directory_path)
ie.move_images_bool_rule(image_input_directory_path, image_output_directory_path, bool_rule, annotation_output_directory_path, 85.0, ['basketball', 'person'])
"""
|
[
"[email protected]"
] | |
8600dbd21a69fe6b9edddacacffc291ea8b22f46
|
9cde8c1e9e176f5092769d4628cc54179a0f5a56
|
/attention_layer.py
|
d554c3b57b6c76474390322fdb4c6aa5c88787b5
|
[] |
no_license
|
Nangal/ieee-dsmp-2018-paper
|
ba260c1f0a8a32fe6ed4d588ae568c9485b31fcb
|
7e80d64a42cd53bfc930b48c331778c759360f36
|
refs/heads/master
| 2022-03-31T01:25:58.472858 | 2020-01-26T16:44:46 | 2020-01-26T16:44:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,347 |
py
|
from keras import backend as K
from keras import initializers, regularizers, constraints
from keras.engine.topology import Layer
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever...
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
|
[
"[email protected]"
] | |
64c237350048f255b969f59f6775e14437e598ff
|
418b79623b277a76fa0450cb9156a2ac2fe0ee46
|
/azinvoice-xtract/azinvoice_xtract.py
|
2163c7fa4efce2c88397a69c17a954b875d1fb6c
|
[
"MIT"
] |
permissive
|
digidrills/inference-models
|
31cccf1f56c53fc340c957d44499f352bc7fb43c
|
9b8d751374f06b02f1a2dada3288bc3c86df9b1c
|
refs/heads/main
| 2023-09-06T04:49:15.674897 | 2021-11-11T08:28:38 | 2021-11-11T08:29:01 | 405,635,988 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,535 |
py
|
import json
def xtract_one_azinvoice(srcname:str, azformJson:dict, inclBox:bool, inclFields:bool) -> dict:
xtract_flds = dict()
xtract_flds.update({'srcname': srcname, 'status': azform_json.get('status'), 'pages': list(), 'boxes': list(), 'fields': list()})
if inclBox and 'readResults' in azform_json.get('analyzeResult'):
for pg in azformJson.get('analyzeResult').get('readResults'):
pgno = pg.get('page')
for ln in pg.get('lines'):
newbox = {'pgno': pgno, 'text': ln.get('text'), 'coords': ln.get('boundingBox')}
xtract_flds.get('boxes').append(newbox)
if inclFields and 'documentResults' in azform_json.get('analyzeResult'):
for colln in azformJson.get('analyzeResult').get('documentResults'):
for fld in colln.get('fields'):
f = colln.get('fields').get(fld)
if 'type' in f and 'text' in f and 'confidence' in f and f.get('type') not in ['array']:
value = ''
value = f.get('valueString') if 'valueString' in f else value
value = f.get('valueNumber') if 'valueNumber' in f else value
value = f.get('valueDate') if 'valueDate' in f else value
l = {'srcname': srcname, 'fld': fld, 'type': f.get('type'), 'text': f.get('text'), 'value': value, 'score': f.get('confidence'), 'tbllist': list()}
xtract_flds.get('fields').append(l)
else:
l = {'fld': fld, 'type': f.get('type'), 'text': '', 'value': '', 'score': 0, 'tbllist': list()}
for rno, r in enumerate(f.get('valueArray')):
if 'type' in r and r.get('type') in ['object'] and 'valueObject' in r:
l.get('tbllist').append({'rno': rno+1, 'r': list()})
itmobj = r.get('valueObject')
for k in itmobj:
i = itmobj.get(k)
value = ''
value = i.get('valueString') if 'valueString' in i else value
value = i.get('valueNumber') if 'valueNumber' in i else value
value = i.get('valueDate') if 'valueDate' in i else value
itm = {'srcname': srcname, 'fld': k, 'type': i.get('type'), 'text': i.get('text'), 'value': value, 'score': i.get('confidence')}
l.get('tbllist')[-1]['r'].append(itm)
xtract_flds.get('fields').append(l)
return xtract_flds
def collate_xtractd_azinvoices(xtractdAzInvoices:dict) -> dict:
azi = xtractdAzInvoices
cl_fields = dict()
for invc in azi:
srcname = azi.get(invc).get('srcname')
for xtract_row in azi.get(invc).get('fields'):
if xtract_row.get('value') not in ['']:
# regular fields.. not a table
cl_fields.update({xtract_row.get('fld'): {'clflds': [], 'textseen': list()}}) if xtract_row.get('fld') not in cl_fields else None
if xtract_row.get('text') not in cl_fields.get(xtract_row.get('fld')).get('textseen'):
# keep only unique records
xtract_row.pop('tbllist', None)
cl_fields.get(xtract_row.get('fld')).get('clflds').append(xtract_row)
cl_fields.get(xtract_row.get('fld')).get('textseen').append(xtract_row.get('text'))
else:
# tabular fields.. is a table
tbl_prefix = xtract_row.get('fld')
for tbl_row in xtract_row.get('tbllist'):
r = tbl_row.get('r')
for col in r:
cl_fields.update({col.get('fld'): {'clflds': [], 'textseen': list()}}) if col.get('fld') not in cl_fields else None
if col.get('text') not in cl_fields.get(col.get('fld')).get('textseen'):
# keep only unique records
col.pop('tbllist', None)
cl_fields.get(col.get('fld')).get('clflds').append(col)
cl_fields.get(col.get('fld')).get('textseen').append(col.get('text'))
# now process the titles also
cl_fields.update({'_InvoiceFields_': {'clflds': [], 'textseen': list()}})
for fld in cl_fields:
row = {'srcname': srcname, 'fld': fld, 'type': 'string', 'text': fld, 'value': fld, 'score': 100}
cl_fields.get('_InvoiceFields_').get('clflds').append(row) if fld not in ['_InvoiceFields_'] else None
# and also the file-names
return cl_fields
#files = ["InvoiceResult-C0139 08-30-2021 DIR108647.pdf.json", "InvoiceResult-D0024 08-27-2021 CIN0009795.pdf.json", "InvoiceResult-D0024 08-31-2021 CIN0010044.pdf.json"]
files = ["InvoiceResult-A0095 01-12-2021 382576-4328.pdf.json"]
xtract_result = dict()
for f in files:
with open(f, encoding="utf8") as fptr:
azform_json = json.load(fptr)
xtract_flds = xtract_one_azinvoice(srcname=f, azformJson=azform_json, inclBox=False, inclFields=True)
fptr.close()
xtract_result.update({f: xtract_flds})
# print(xtract_result)
colltd_fields = collate_xtractd_azinvoices(xtractdAzInvoices=xtract_result)
for fld in colltd_fields:
print("======================= {}".format(fld))
for val in colltd_fields.get(fld).get('clflds'):
print(val)
|
[
"[email protected]"
] | |
8d3a150e92b97edc73a1af8bcfa9566c2296219c
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/.install/.backup/lib/surface/pubsub/subscriptions/seek.py
|
718094747211caab81d5b553f97be853d2cb982b
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 |
Apache-2.0
| 2020-12-24T00:38:09 | 2017-01-18T00:05:44 |
Python
|
UTF-8
|
Python
| false | false | 3,886 |
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub subscriptions seek command."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.command_lib.pubsub import util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SeekAlpha(base.Command):
"""This feature is part of an invite-only release of the Cloud Pub/Sub API.
Resets a subscription's backlog to a point in time or to a given snapshot.
This feature is part of an invitation-only release of the underlying
Cloud Pub/Sub API. The command will generate errors unless you have access to
this API. This restriction should be relaxed in the near future. Please
contact [email protected] with any questions in the meantime.
"""
@staticmethod
def Args(parser):
"""Registers flags for this command."""
parser.add_argument('subscription',
help='Name of the subscription to affect.')
seek_to_parser = parser.add_mutually_exclusive_group(required=True)
seek_to_parser.add_argument(
'--time', type=arg_parsers.Datetime.Parse,
help=('The time to seek to. Messages in the subscription that '
'were published before this time are marked as acknowledged, and '
'messages retained in the subscription that were published after '
'this time are marked as unacknowledged. See `gcloud topic '
'datetimes` for information on time formats.'))
seek_to_parser.add_argument(
'--snapshot',
help=('The name of the snapshot. The snapshot\'s topic must be the same'
' as that of the subscription.'))
parser.add_argument(
'--snapshot-project', default='',
help=('The name of the project the snapshot belongs to (if seeking to '
'a snapshot). If not set, it defaults to the currently selected '
'cloud project.'))
def Collection(self):
return util.SUBSCRIPTIONS_SEEK_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
A serialized object (dict) describing the results of the operation. This
description fits the Resource described in the ResourceRegistry under
'pubsub.subscriptions.seek'.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
subscription_path = util.SubscriptionFormat(args.subscription)
result = {'subscriptionId': subscription_path}
seek_req = msgs.SeekRequest()
if args.snapshot:
if args.snapshot_project:
snapshot_project = (
projects_util.ParseProject(args.snapshot_project).Name())
else:
snapshot_project = ''
seek_req.snapshot = util.SnapshotFormat(args.snapshot, snapshot_project)
result['snapshotId'] = seek_req.snapshot
else:
seek_req.time = args.time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
result['time'] = seek_req.time
pubsub.projects_subscriptions.Seek(
msgs.PubsubProjectsSubscriptionsSeekRequest(
seekRequest=seek_req, subscription=subscription_path))
return result
|
[
"[email protected]"
] | |
fb623d563352d34f93d25db2280e97e0f3f9054a
|
1fedce4c4d703711422f7303b56dce184c7db40b
|
/pset6/greedy/greedy.py
|
336aa056a77e015b5ef9fcad417ff59e5fc28fc8
|
[
"MIT"
] |
permissive
|
2series/Introduction-To-Computer-Science
|
76e5325cf1f6b6b55ec3e3d60335e271b9d9b866
|
74d3000ae95cd8b4773e35a55bcbaa5cd15bcf05
|
refs/heads/master
| 2021-06-17T17:18:53.827098 | 2019-05-10T10:26:46 | 2019-05-10T10:26:46 | 185,957,686 | 0 | 0 |
MIT
| 2021-03-20T01:17:51 | 2019-05-10T09:14:34 |
C
|
UTF-8
|
Python
| false | false | 966 |
py
|
def get_float(s):
try:
f = float(input(s))
except:
f = get_float("Try again: ")
finally:
return f
def mult(amt, denom):
temp = amt - (amt % denom)
return temp // denom
change = round(get_float("O hai! How much change is owed?\n"), 2)
while change < 0:
change = round(get_float("Try again: "), 2)
coins = 0
cents = int(change * 100)
denoms = {"quarter":25, "dime":10, "nickel":5, "penny":1}
while not cents == 0:
if cents >= denoms["quarter"]:
coins += mult(cents, denoms["quarter"])
cents = cents % denoms["quarter"]
elif cents >= denoms["dime"]:
coins += mult(cents, denoms["dime"])
cents = cents % denoms["dime"]
elif cents >= denoms["nickel"]:
coins += mult(cents, denoms["nickel"])
cents = cents % denoms["nickel"]
elif cents >= denoms["penny"]:
coins += mult(cents, denoms["penny"])
cents = cents % denoms["penny"]
print(coins)
|
[
"[email protected]"
] | |
93ce085eb9a71c2da29d7c2bf4213cd57b201d5a
|
12a787bae1a844240c039ce5622435dbe6efd7c9
|
/cs_merge_msgs/src/cs_merge_msgs/srv/__init__.py
|
4fbf9118d6c4e63e1a58ae749607bc08532f2765
|
[
"MIT"
] |
permissive
|
Roboterbastler/cs_merge
|
b1d7ac2754fc2ef850ec3a8dba5e0934a1f0f139
|
3fbfd135fe2716038407d10b8c97aaa7977336ad
|
refs/heads/master
| 2021-01-12T10:50:10.060393 | 2015-09-15T22:35:20 | 2015-09-15T22:35:20 | 72,725,750 | 0 | 0 | null | 2016-11-03T08:49:11 | 2016-11-03T08:49:11 | null |
UTF-8
|
Python
| false | false | 54 |
py
|
from ._getWorld import *
from ._getTransform import *
|
[
"[email protected]"
] | |
226baf22346933643c546aa6960a462584a4b071
|
be793793d47b9687624da10e579dc923f4513aa0
|
/bin/gunicorn
|
5e11605cfd7af202235d465266eea369bca5833a
|
[] |
no_license
|
keyurparalkar/tikonaBot
|
d5d8ecae64e26afefee7e13e8cbbedd4aa2f01d6
|
b36c7063d708563e749216b8e8dd86459998d7e2
|
refs/heads/master
| 2022-12-12T13:26:46.502055 | 2017-10-19T10:41:13 | 2017-10-19T10:41:13 | 97,124,387 | 0 | 1 | null | 2022-07-06T19:16:54 | 2017-07-13T13:15:51 |
Python
|
UTF-8
|
Python
| false | false | 261 |
#!/home/keyur/Documents/tikonaProject/tikonaBot/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"[email protected]"
] | ||
59b8e49af01222c497d6f39607db2bb3f9786223
|
a99b618dd60ba9c978183c9f46b3e300be2d42b8
|
/flaskblog/run.py
|
5230fa0caba0971a1acc64f5bb4e42954df322a5
|
[] |
no_license
|
mysterier/flask-blog
|
bd422207cc15c193d8ab4320e4d8f8276de4da54
|
284e4086cf57a16635ff393c0f21699ceec76ba5
|
refs/heads/master
| 2021-01-18T16:17:34.307693 | 2014-05-01T12:00:43 | 2014-05-01T12:00:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
from app import app
from flaskext.markdown import Markdown
if __name__ == '__main__':
Markdown(app)
app.debug = True
app.run()
|
[
"[email protected]"
] | |
c95422cc0fd19646f3dba767a1d44ec273e65de7
|
4bf1f4e11cdf660ea4028af6d2bb02320903d393
|
/Constants.py
|
732630559e92c3bc4016ca02020ea560b8fb0c68
|
[] |
no_license
|
ilteralp/remote_sensing_with_gcn
|
e35f3019f0f8394d6e81811e9b07df62751975a1
|
a2cb9ed4a31da0e87c11f56b58bfe14dcbf35755
|
refs/heads/master
| 2023-04-19T06:09:38.326557 | 2021-05-07T22:02:17 | 2021-05-07T22:02:17 | 262,830,070 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,321 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 22 02:19:00 2020
@author: melike
"""
import os
NODE_FILE_NAME = '68k_pixel_as_node.txt'
ALPHA_TRAIN_PERCENT = 0.8
NUM_VAIHINGEN_CLASSES = 6
LR = 0.01
SEED = 4242
NUM_NODES = 48
""" ================================ Melike ================================ """
# BASE_PATH = "C:\\Users\\melike\\RS"
""" ================================== ROG ================================= """
BASE_PATH = "/home/rog/rs"
""" ======================================================================== """
ROOT_PATH = os.path.join(BASE_PATH, 'gcn', 'paths19', 'test_149')
VAI_PATH = os.path.join(BASE_PATH, 'vaihingen')
ALPHA_NOT_ADJ_NODE_FEATS_PATH = os.path.join(VAI_PATH, 'alpha_feats_no_adj.txt')
ALPHA_CLIQUE_EDGES_PATH = os.path.join(VAI_PATH, 'alpha_cliques.txt')
ALPHA_ROOT_PATH = os.path.join(VAI_PATH, '_pyg')
ALPHA_ADJ_NODE_FEATS_PATH = os.path.join(VAI_PATH, 'alpha_feats_adj.txt')
ALPHA_SPATIAL_ADJ_EDGES_PATH = os.path.join(VAI_PATH, 'alpha_spatial_adj.txt')
ALPHA_WEKA_FEATS_PATH = os.path.join(VAI_PATH, 'python_alpha_feats')
RESULTS_PATH = os.path.join(VAI_PATH, 'results')
SLIC_FOLDER_PATH = os.path.join(VAI_PATH, 'SLIC')
IMG_PATH = os.path.join(VAI_PATH, 'image', 'png', 'top_mosaic_09cm_area1.png')
LENNA_IMG_PATH = os.path.join(SLIC_FOLDER_PATH, 'lenna.png')
|
[
"[email protected]"
] | |
5a57d709e68b57343a2f490cf6f527c2a7bb2503
|
e18c84358b2a80159b37dcea39debfbbdaa66395
|
/backend/api/views/image_c.py
|
0dcc38ac99342faa71280bd72d3802a93a490817
|
[
"MIT"
] |
permissive
|
chunyenHuang/Disfactory
|
49d404609b73783ac488be9430d9cf518fc19f64
|
52985f7aadc8ca56344f80000b5e943bea99f83d
|
refs/heads/master
| 2021-01-03T01:54:40.415165 | 2020-01-22T04:09:29 | 2020-01-22T04:09:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
from django.conf import settings
from django.http import HttpResponse, JsonResponse
import django_q.tasks
from rest_framework.decorators import api_view
from ..models import Image
from .utils import (
_is_image,
_get_image_original_date,
)
@api_view(['POST'])
def post_image(request):
f_image = request.FILES['image']
if _is_image(f_image):
f_image.seek(0)
image_original_date = _get_image_original_date(f_image)
kwargs = {
'image_path': '',
'orig_time': image_original_date,
}
img = Image.objects.create(**kwargs)
f_image.seek(0)
django_q.tasks.async_task('api.tasks.upload_image', f_image.read(), settings.IMGUR_CLIENT_ID, img.id)
return JsonResponse({"token": img.id})
return HttpResponse(
"The uploaded file cannot be parsed to Image",
status=400,
)
|
[
"[email protected]"
] | |
ef69505a910f54c9b53a00a83f9f1824f0ec94ff
|
361060c2549af9ab271a1bf3fc6bbe2f5cdec827
|
/agents.py
|
84503442f169b3166c0c7f58fa573e53273881c6
|
[] |
no_license
|
We-Gold/Learning-Reinforcement-Learning
|
2f0d9414b1f9a3694f00fc818120146481f5765a
|
fcb5ee36b83831843f4049e025b46e162d2410b4
|
refs/heads/master
| 2021-05-17T03:59:43.758600 | 2020-03-28T21:31:39 | 2020-03-28T21:31:39 | 250,612,013 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,687 |
py
|
import numpy as np
import torch as T
from deep_q_network import DeepQNetwork, DuelingDeepQNetwork
from replay_memory import ReplayBuffer
class Agent():
def __init__(self, gamma, epsilon, lr, n_actions, input_dims,
mem_size, batch_size, eps_min=0.01, eps_dec=5e-7,
replace=1000, algo=None, env_name=None, chkpt_dir='tmp/dqn'):
self.gamma = gamma
self.epsilon = epsilon
self.lr = lr
self.n_actions = n_actions
self.input_dims = input_dims
self.eps_min = eps_min
self.eps_dec = eps_dec
self.action_space = [i for i in range(n_actions)]
self.learn_step_counter = 0
self.batch_size = batch_size
self.replace_target_cnt = replace
self.algo = algo
self.env_name = env_name
self.chkpt_dir = chkpt_dir
self.memory = ReplayBuffer(mem_size, input_dims, n_actions)
def store_transition(self, state, action, reward, state_, done):
self.memory.store_transition(state, action, reward, state_, done)
def choose_action(self, observation):
raise NotImplementedError
def replace_target_network(self):
if self.learn_step_counter % self.replace_target_cnt == 0:
self.q_next.load_state_dict(self.q_eval.state_dict())
def decrement_epsilon(self):
self.epsilon = self.epsilon - self.eps_dec \
if self.epsilon > self.eps_min else self.eps_min
def sample_memory(self):
state, action, reward, new_state, done = \
self.memory.sample_buffer(self.batch_size)
states = T.tensor(state).to(self.q_eval.device)
rewards = T.tensor(reward).to(self.q_eval.device)
dones = T.tensor(done).to(self.q_eval.device)
actions = T.tensor(action).to(self.q_eval.device)
states_ = T.tensor(new_state).to(self.q_eval.device)
return states, actions, rewards, states_, dones
def learn(self):
raise NotImplementedError
def save_models(self):
self.q_eval.save_checkpoint()
self.q_next.save_checkpoint()
def load_models(self):
self.q_eval.load_checkpoint()
self.q_next.load_checkpoint()
class DQNAgent(Agent):
def __init__(self, *args, **kwargs):
super(DQNAgent, self).__init__(*args, **kwargs)
self.q_eval = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_eval',
chkpt_dir=self.chkpt_dir)
self.q_next = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_next',
chkpt_dir=self.chkpt_dir)
def choose_action(self, observation):
if np.random.random() > self.epsilon:
state = T.tensor([observation],dtype=T.float).to(self.q_eval.device)
actions = self.q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
self.q_eval.optimizer.zero_grad()
self.replace_target_network()
states, actions, rewards, states_, dones = self.sample_memory()
indices = np.arange(self.batch_size)
q_pred = self.q_eval.forward(states)[indices, actions]
q_next = self.q_next.forward(states_).max(dim=1)[0]
q_next[dones] = 0.0
q_target = rewards + self.gamma*q_next
loss = self.q_eval.loss(q_target, q_pred).to(self.q_eval.device)
loss.backward()
self.q_eval.optimizer.step()
self.learn_step_counter += 1
self.decrement_epsilon()
class DDQNAgent(Agent):
def __init__(self, *args, **kwargs):
super(DDQNAgent, self).__init__(*args, **kwargs)
self.q_eval = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_eval',
chkpt_dir=self.chkpt_dir)
self.q_next = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_next',
chkpt_dir=self.chkpt_dir)
def choose_action(self, observation):
if np.random.random() > self.epsilon:
state = T.tensor([observation],dtype=T.float).to(self.q_eval.device)
actions = self.q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
self.q_eval.optimizer.zero_grad()
self.replace_target_network()
states, actions, rewards, states_, dones = self.sample_memory()
indices = np.arange(self.batch_size)
q_pred = self.q_eval.forward(states)[indices, actions]
q_next = self.q_next.forward(states_)
q_eval = self.q_eval.forward(states_)
max_actions = T.argmax(q_eval, dim=1)
q_next[dones] = 0.0
q_target = rewards + self.gamma*q_next[indices, max_actions]
loss = self.q_eval.loss(q_target, q_pred).to(self.q_eval.device)
loss.backward()
self.q_eval.optimizer.step()
self.learn_step_counter += 1
self.decrement_epsilon()
class DuelingDQNAgent(Agent):
def __init__(self, *args, **kwargs):
super(DuelingDQNAgent, self).__init__(*args, **kwargs)
self.q_eval = DuelingDeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_eval',
chkpt_dir=self.chkpt_dir)
self.q_next = DuelingDeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_next',
chkpt_dir=self.chkpt_dir)
def choose_action(self, observation):
if np.random.random() > self.epsilon:
state = T.tensor([observation],dtype=T.float).to(self.q_eval.device)
_, advantage = self.q_eval.forward(state)
action = T.argmax(advantage).item()
else:
action = np.random.choice(self.action_space)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
self.q_eval.optimizer.zero_grad()
self.replace_target_network()
states, actions, rewards, states_, dones = self.sample_memory()
indices = np.arange(self.batch_size)
V_s, A_s = self.q_eval.forward(states)
V_s_, A_s_ = self.q_next.forward(states_)
q_pred = T.add(V_s,
(A_s - A_s.mean(dim=1, keepdim=True)))[indices, actions]
q_next = T.add(V_s_,
(A_s_ - A_s_.mean(dim=1, keepdim=True))).max(dim=1)[0]
q_next[dones] = 0.0
q_target = rewards + self.gamma*q_next
loss = self.q_eval.loss(q_target, q_pred).to(self.q_eval.device)
loss.backward()
self.q_eval.optimizer.step()
self.learn_step_counter += 1
self.decrement_epsilon()
class DuelingDDQNAgent(Agent):
def __init__(self, *args, **kwargs):
super(DuelingDDQNAgent, self).__init__(*args, **kwargs)
self.q_eval = DuelingDeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_eval',
chkpt_dir=self.chkpt_dir)
self.q_next = DuelingDeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=self.env_name+'_'+self.algo+'_q_next',
chkpt_dir=self.chkpt_dir)
def choose_action(self, observation):
if np.random.random() > self.epsilon:
state = T.tensor([observation],dtype=T.float).to(self.q_eval.device)
_, advantage = self.q_eval.forward(state)
action = T.argmax(advantage).item()
else:
action = np.random.choice(self.action_space)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
self.q_eval.optimizer.zero_grad()
self.replace_target_network()
states, actions, rewards, states_, dones = self.sample_memory()
indices = np.arange(self.batch_size)
V_s, A_s = self.q_eval.forward(states)
V_s_, A_s_ = self.q_next.forward(states_)
V_s_eval, A_s_eval = self.q_eval.forward(states_)
q_pred = T.add(V_s,
(A_s - A_s.mean(dim=1, keepdim=True)))[indices, actions]
q_next = T.add(V_s_, (A_s_ - A_s_.mean(dim=1, keepdim=True)))
q_eval = T.add(V_s_eval,
(A_s_eval - A_s_eval.mean(dim=1, keepdim=True)))
max_actions = T.argmax(q_eval, dim=1)
q_next[dones] = 0.0
q_target = rewards + self.gamma*q_next[indices, max_actions]
loss = self.q_eval.loss(q_target, q_pred).to(self.q_eval.device)
loss.backward()
self.q_eval.optimizer.step()
self.learn_step_counter += 1
self.decrement_epsilon()
|
[
"[email protected]"
] | |
1fcc0d061eba6fcca3fbf4798c9700327dedb7d9
|
c60e9a7b98862c3325cf74c60d7972faf5060872
|
/leetcode_14_longest_common_prefix.py
|
50def26841f79fdb89a5e05f134f93f9ca387f30
|
[] |
no_license
|
CharleXu/Leetcode
|
dd4bea4f96c486f85dd4efb846e769ebd05a84ed
|
3f8f954dce580119a741f638d59bdaa17f552223
|
refs/heads/master
| 2022-10-15T20:33:11.766045 | 2020-06-18T03:18:10 | 2020-06-18T03:18:10 | 266,402,592 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
# coding: utf-8
def longest_common_prefix(strs):
if not strs:
return ""
n = len(strs)
prefix = strs[0]
for i in range(1, n):
while strs[i].find(prefix) != 0:
prefix = prefix[0:len(prefix) - 1]
if not prefix:
return ""
return prefix
if __name__ == "__main__":
li = [["flower", "flow", "flight"], ["dog", "racecar", "car"]]
for l in li:
print(longest_common_prefix(l))
|
[
"[email protected]"
] | |
66c57158fb305eb8f86eb55de1176262d8aa01da
|
ea6aa89215c6c972f871764590f7c17bd74cd057
|
/blog/urls.py
|
a8f9552d84ad59a91595acba25366275d7d6a8c2
|
[] |
no_license
|
alperkarabayir/DjangoBlog
|
8624be05f4aadac74e6aa082911585244d12de88
|
fee3ee8990577b40071de98650cb3caeda6f6b3b
|
refs/heads/master
| 2022-04-05T21:20:39.927107 | 2020-02-11T07:51:15 | 2020-02-11T07:51:15 | 235,230,668 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
from django.urls import path, include
from . import views
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.PostListView.as_view(), name='blog-home'),
path('getallposts/', views.get_posts, name='get-all-posts'),
path('user/<str:username>', views.UserPostListView.as_view(), name='user-posts'),
path('topic/<str:topic_name>', views.TopicListView.as_view(), name='topic-posts'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/delete', views.PostDeleteView.as_view(), name='post-delete'),
path('search/', views.search, name='blog-search'),
path('result/', views.result, name='blog-result'),
path('admin/', admin.site.urls),
path('register/', views.register, name='register'),
path('profile/', views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='blog/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='blog/logout.html'), name='logout'),
]
|
[
"[email protected]"
] | |
a788ecad5cc912d6405ede696a2f16263c295b76
|
8126d1bc2afe0925a24fce039d0f02a3bd7acbae
|
/pytraj/c_action/__init__.py
|
de635542285540646e5470bb9b3a11a2de034598
|
[
"BSD-2-Clause"
] |
permissive
|
rafwiewiora/pytraj
|
54fb6fe07a754f65b865dd161f64c7af15fc3926
|
91a019ea406081ccf0043170cc64c48b4a5ea04a
|
refs/heads/master
| 2021-01-20T17:33:05.974254 | 2016-03-11T21:25:32 | 2016-03-11T21:25:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
""""""
from __future__ import absolute_import
from . import c_action
actionlist = []
for act in c_action.__dict__.keys():
if 'Action' in act:
actionlist.append(act)
__all__ = actionlist
__doc__ = "\n".join(__all__)
|
[
"[email protected]"
] | |
edf37de051d2c0a8266c5ce04add9a0748025046
|
d2e01f1b622e1f77a46a2f7cffd011e2fc2d21ad
|
/account_manage/models.py
|
2ab964262d7a3bf0e8d9291e3d52298caab44dc4
|
[] |
no_license
|
JohnCny/myOA
|
ddd304411deb1025c253deb9db585629933c6ab1
|
f2d6aa750cf48d2f3101937cf5f7f2cdf16b59e6
|
refs/heads/master
| 2020-05-18T12:39:02.364827 | 2013-01-29T09:19:48 | 2013-01-29T09:19:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,526 |
py
|
# -*- coding: utf8 -*-
from django.db import models
import datetime
from django.contrib import admin
from system_avalibe.models import my_user
from system_avalibe.models import department
from system_avalibe.models import my_account_type
# Create your models here.
class account (models.Model):
user_id=models.ForeignKey(my_user,related_name='user_id',verbose_name="用户")
department=models.ForeignKey(department,verbose_name="所属部门")
type_id=models.ForeignKey(my_account_type,verbose_name="费用类型")
amount=models.IntegerField(verbose_name="金额")
beg_date=models.DateField(default=datetime.datetime.now,verbose_name="费用开始日期")
modify_date=models.DateField(default=datetime.datetime.now,verbose_name="修改日期")
end_date=models.DateField(default=datetime.datetime.now,verbose_name="费用结束日期")
status=models.IntegerField(default=0,verbose_name="当前状态")
approver=models.ForeignKey(my_user,related_name="approver",verbose_name="审核人")
level=models.IntegerField(default=0,verbose_name="级别")
is_paid=models.IntegerField(default=0,verbose_name="是否付款")
pay_date=models.DateField(default=None,verbose_name="付款日期")
company=models.IntegerField(default=0,verbose_name="公司")
def __unicode__(self):
return str(self.amount)
class Meta:
ordering = ['beg_date']
class account_show(admin.ModelAdmin):
list_display=('user_id','department','beg_date')
admin.site.register(account)
|
[
"[email protected]"
] | |
d5819b2f4f33d1ec6a077546f64c2b6e4b92968b
|
8620d98b00cf0a9f60415408bf82184efd20431a
|
/Codewars/Remove the parentheses.py
|
4942b2db2d2619ef5572685d2c498a9e0f1200bf
|
[] |
no_license
|
SA-Inc/Contest-Tasks
|
628aa4028bb5e3e5efc519c1369f5c95f4b46eff
|
dfffaec7d93fe217f19d532a3c5c799103f2a06d
|
refs/heads/main
| 2023-02-26T21:56:54.067172 | 2021-02-09T07:23:26 | 2021-02-09T07:23:26 | 323,820,065 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,031 |
py
|
# https://www.codewars.com/kata/5f7c38eb54307c002a2b8cc8
def remove_parentheses(s):
# res = ''
# startIdx = None
# endIdx = None
# for i in range(len(s)):
# if s[i] == '(':
# startIdx = i
# break
# for i in reversed(range(len(s))):
# if s[i] == ')':
# endIdx = i
# break
# for i in range(len(s)):
# if i >= startIdx and i <= endIdx:
# continue
# else:
# res += s[i]
# return res
# isSkip = False
# for i in range(len(s)):
# if s[i] == '(':
# isSkip = True
# if s[i] == ')':
# isSkip = False
# continue
# if isSkip == False:
# res += s[i]
# return res
parenthesesCount = 0
res = ''
for i in s:
if i == '(':
parenthesesCount += 1
elif i == ')':
parenthesesCount -= 1
else:
if parenthesesCount == 0:
res += i
return res
|
[
"[email protected]"
] | |
1156225b525b0daf2c435e1d40ee14ff99c2bcf8
|
e255d29a94cfc6e8b79284f924083537ea495e09
|
/icinga2api/events.py
|
dd4724245f0765d2f82b51a2c03de8763fb66357
|
[
"BSD-2-Clause"
] |
permissive
|
fmnisme/python-icinga2api
|
570f2c57dcce6d9bba29e04cd7fca137c1f513cd
|
2913be2104f1218015d25a97bf62ae38b4a477bf
|
refs/heads/master
| 2021-01-10T11:42:21.059561 | 2020-06-04T17:51:43 | 2020-06-04T17:51:43 | 46,486,304 | 39 | 33 |
BSD-2-Clause
| 2020-12-14T19:23:44 | 2015-11-19T10:52:03 |
Python
|
UTF-8
|
Python
| false | false | 2,796 |
py
|
# -*- coding: utf-8 -*-
'''
Copyright 2017 [email protected]
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Icinga 2 API events
'''
from __future__ import print_function
import logging
from icinga2api.base import Base
LOG = logging.getLogger(__name__)
class Events(Base):
'''
Icinga 2 API events class
'''
base_url_path = 'v1/events'
def subscribe(self,
types,
queue,
filters=None,
filter_vars=None):
'''
subscribe to an event stream
example 1:
types = ["CheckResult"]
queue = "monitor"
filters = "event.check_result.exit_status==2"
for event in subscribe(types, queue, filters):
print event
:param types: the event types to return
:type types: array
:param queue: the queue name to subscribe to
:type queue: string
:param filters: filters matched object(s)
:type filters: string
:param filter_vars: variables used in the filters expression
:type filter_vars: dict
:returns: the events
:rtype: string
'''
payload = {
"types": types,
"queue": queue,
}
if filters:
payload["filter"] = filters
if filter_vars:
payload["filter_vars"] = filter_vars
stream = self._request(
'POST',
self.base_url_path,
payload,
stream=True
)
for event in self._get_message_from_stream(stream):
yield event
|
[
"[email protected]"
] | |
c3db96eb2731a8edb48136484e89885480f85bca
|
29b96a3ea850851bcb4d485be421af7ea801b071
|
/example_submission/controller.py
|
616bcb706d6aa3d6ef46a4ebd1a2fbafbcf09f86
|
[
"MIT"
] |
permissive
|
SambhawDrag/LBD-neurIPS-2021
|
b13b73070422762fe0f95b681c466373296e7178
|
fb39d2b6675e6d05175e2c66d5b7c667eb3db21d
|
refs/heads/main
| 2023-07-18T11:55:36.180789 | 2021-08-27T01:05:28 | 2021-08-27T01:05:28 | 388,462,641 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,739 |
py
|
"""
Controller template.
"""
import numpy as np
class controller(object):
def __init__(self, system, d_control):
"""
Entry point, called once when starting the controller for a newly
initialized system.
Input:
system - holds the identifying system name;
all evaluation systems have corresponding training data;
you may use this name to instantiate system specific
controllers
d_control - indicates the control input dimension
"""
self.system = system
self.d_control = d_control
def get_input(self, state, position, target):
"""
This function is called at each time step and expects the next
control input to apply to the system as return value.
Input: (all column vectors, if default wrapcontroller.py is used)
state - vector representing the current state of the system;
by convention the first two entries always correspond
to the end effectors X and Y coordinate;
the state variables are in the same order as in the
corresponding training data for the current system
with name self.system
position - vector of length two representing the X and Y
coordinates of the current position
target - vector of length two representing the X and Y
coordinates of the next steps target position
"""
# placeholder that just returns a next control input of correct shape
inp = np.random.randn(self.d_control, 2).dot(target - position)
return inp
|
[
"[email protected]"
] | |
6f0edb16dc41d65e86230d1a1fe1aa409e5680f9
|
f63f56fd03fd1474a5710f27fbe3b27a943d675a
|
/app/email.py
|
2b2ac97aa90a73cb081ef1a73cee2fbd7c56f6ca
|
[
"MIT"
] |
permissive
|
LewisNjagi/blog-website
|
e4cbe5681d14c3fa4543ac95f625937e9d85a610
|
e1f1d95ddd53a924988bfe669a4946b15dc33ac2
|
refs/heads/master
| 2023-03-19T00:01:53.913791 | 2021-03-09T15:16:30 | 2021-03-09T15:16:30 | 344,771,449 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 383 |
py
|
from flask_mail import Message
from flask import render_template
from . import mail
sender_email = '[email protected]'
def mail_message(subject,template,to,**kwargs):
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
|
[
"[email protected]"
] | |
0dfba7d155244f359607b1677e3e305b9ded986b
|
12094b02c411f986e8f66f1f971f8e3ae99a8167
|
/OpenControl/classiccontrol/utils.py
|
5611e7c4dc74a883814da33db0c15aa2a3e05972
|
[
"MIT"
] |
permissive
|
masbudisulaksono/OpenControl
|
f8fcbe63290a0011f3ea7412a0200ca9f8913ec9
|
0087408c57bc77f34f524b28f8c4363b116700bb
|
refs/heads/master
| 2023-06-22T09:57:54.579445 | 2021-07-16T05:10:59 | 2021-07-16T05:10:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 993 |
py
|
import numpy as np
import random
import math
import scipy.linalg
def is_definite_positive_matrix(M):
"""Determine a maxtrix if
Args:
M : 2D-ndarray
Returns:
[Boolean]: True if matrix is definite positive
"""
condition1 = np.all(np.linalg.eigvals(M) > 0)
condition2 = np.allclose(M,M.T)
return condition1 and condition2
def get_coefficient(s):
"""Finding Coefficient of a term in Polynomial with predefine solution
Args:
s (list): solution of Polynomial
Returns:
np.ndarray: [description]
"""
len_ = len(s)
A = np.zeros((len_,len_))
b = np.zeros((len_,1))
for i in range(len_):
giatri = random.randint(0,100)
tich = 1
for index,nghiem in enumerate(s):
tich = tich*(giatri-nghiem)
tich = tich - math.pow(giatri,len_)
for j in range(len_):
A[i,j] = math.pow(giatri,j)
b[i][0]=tich
x = scipy.linalg.pinv(A)@b
return x
|
[
"[email protected]"
] | |
cfa8945289850ff63e497fcc908de2732efb4faf
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py
|
d81a741d398ce19a72f4ca18421e45b81afc015c
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 |
Python
|
UTF-8
|
Python
| false | false | 1,419 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py
import BigWorld
from gui.Scaleform.daapi.view.battle.event.boss_teleport import EventBossTeleportView
from gui.Scaleform.daapi.view.meta.EventHunterRespawnViewMeta import EventHunterRespawnViewMeta
from gui.wt_event.wt_event_helpers import getSpeed
from gui.impl import backport
from gui.impl.gen import R
from gui.shared.gui_items.Vehicle import getIconResourceName
class EventHunterRespawnView(EventBossTeleportView, EventHunterRespawnViewMeta):
def onRespawnPointClick(self, pointGuid):
self._chooseSpawnPoint(pointGuid)
def showSpawnPoints(self):
self._blur.enable()
timeLeft = 0
timeTotal = 0
respawnComponent = BigWorld.player().dynamicComponents.get('respawnComponent')
if respawnComponent:
timeLeft = respawnComponent.endTime - BigWorld.serverTime()
timeTotal = respawnComponent.duration
self.as_updateTimerS(timeLeft, timeTotal, replaySpeed=getSpeed())
vTypeVO = self._sessionProvider.getCtx().getVehicleInfo(BigWorld.player().playerVehicleID).vehicleType
iconName = getIconResourceName(vTypeVO.iconName)
icon = R.images.gui.maps.icons.wtevent.hunterRespawn.dyn(iconName)
if icon.exists():
self.as_setIconS(backport.image(icon()))
|
[
"[email protected]"
] | |
a12758348fbc7e7296b3a997c889bf29e1f5be19
|
3bba01146505eaa9742f665612e7c31910b02daf
|
/api/services/AccountService.py
|
11de57d69b42fea5899189a806dc99433752ee20
|
[] |
no_license
|
leafunes/yaas
|
3f1901e99e63a7eaaacfd0dd0cd58394d30c755c
|
6d89a16c80b3e6b1259f0c9718dff6d7a76297e3
|
refs/heads/master
| 2023-01-04T21:14:52.736085 | 2020-11-02T03:36:15 | 2020-11-02T03:36:15 | 309,244,904 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,041 |
py
|
from model.Transaction import Transaction
from data.OnMemory import OnMemory
from errors.errors import NegativeTotalError, TransactionNotFoundError
class AccountService():
def __init__(self):
self.db = OnMemory() #TODO: parametrize
def create_credit(self, amount, description):
tr = Transaction("credit", amount, description)
return self.db.save_transaction(tr)
def create_debit(self, amount, description):
account_amount = self.get_account_summary()
if account_amount - amount < 0:
raise NegativeTotalError(amount, account_amount)
tr = Transaction("debit", amount, description)
return self.db.save_transaction(tr)
def get_account_summary(self):
return self.db.get_account_summary()
def get_transactions(self):
return self.db.get_all_transactions()
def get_transaction_by_id(self, id):
tr = self.db.get_transaction_by_id(id)
if tr is None:
raise TransactionNotFoundError(id)
return tr
|
[
"[email protected]"
] | |
1e931754ef5271d1566e68b855c1e6c2ee55230b
|
7de32eb8b115845a4d396785e1928b7af272ca5b
|
/testing_servo/servo_DS3218.py
|
7e123e3db65919999b3663b3e3a5e716154d45ed
|
[] |
no_license
|
ccrreeaattoorr/carwash
|
3aaddaab6f50534cbcfbc9a342254ba77e14a929
|
72a4657b756546314d92b3936046f061acc6cc78
|
refs/heads/master
| 2021-07-16T20:52:50.824147 | 2020-10-11T18:55:45 | 2020-10-11T18:55:45 | 217,760,310 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,679 |
py
|
import RPi.GPIO import random
import serial
import struct
import time
class Roboclaw:
'Roboclaw Interface Class'
def __init__(self, comport, rate, timeout=0.01, retries=3):
self.comport = comport
self.rate = rate
self.timeout = timeout;
self._trystimeout = retries
self._crc = 0;
#Command Enums
class Cmd():
M1FORWARD = 0
M1BACKWARD = 1
SETMINMB = 2
SETMAXMB = 3
M2FORWARD = 4
M2BACKWARD = 5
M17BIT = 6
M27BIT = 7
MIXEDFORWARD = 8
MIXEDBACKWARD = 9
MIXEDRIGHT = 10
MIXEDLEFT = 11
MIXEDFB = 12
MIXEDLR = 13
GETM1ENC = 16
GETM2ENC = 17
GETM1SPEED = 18
GETM2SPEED = 19
RESETENC = 20
GETVERSION = 21
SETM1ENCCOUNT = 22
SETM2ENCCOUNT = 23
GETMBATT = 24
GETLBATT = 25
SETMINLB = 26
SETMAXLB = 27
SETM1PID = 28
SETM2PID = 29
GETM1ISPEED = 30
GETM2ISPEED = 31
M1DUTY = 32
M2DUTY = 33
MIXEDDUTY = 34
M1SPEED = 35
M2SPEED = 36
MIXEDSPEED = 37
M1SPEEDACCEL = 38
M2SPEEDACCEL = 39
MIXEDSPEEDACCEL = 40
M1SPEEDDIST = 41
M2SPEEDDIST = 42
MIXEDSPEEDDIST = 43
M1SPEEDACCELDIST = 44
M2SPEEDACCELDIST = 45
MIXEDSPEEDACCELDIST = 46
GETBUFFERS = 47
GETPWMS = 48
GETCURRENTS = 49
MIXEDSPEED2ACCEL = 50
MIXEDSPEED2ACCELDIST = 51
M1DUTYACCEL = 52
M2DUTYACCEL = 53
MIXEDDUTYACCEL = 54
READM1PID = 55
READM2PID = 56
SETMAINVOLTAGES = 57
SETLOGICVOLTAGES = 58
GETMINMAXMAINVOLTAGES = 59
GETMINMAXLOGICVOLTAGES = 60
SETM1POSPID = 61
SETM2POSPID = 62
READM1POSPID = 63
READM2POSPID = 64
M1SPEEDACCELDECCELPOS = 65
M2SPEEDACCELDECCELPOS = 66
MIXEDSPEEDACCELDECCELPOS = 67
SETM1DEFAULTACCEL = 68
SETM2DEFAULTACCEL = 69
SETPINFUNCTIONS = 74
GETPINFUNCTIONS = 75
SETDEADBAND = 76
GETDEADBAND = 77
RESTOREDEFAULTS = 80
GETTEMP = 82
GETTEMP2 = 83
GETERROR = 90
GETENCODERMODE = 91
SETM1ENCODERMODE = 92
SETM2ENCODERMODE = 93
WRITENVM = 94
READNVM = 95
SETCONFIG = 98
GETCONFIG = 99
SETM1MAXCURRENT = 133
SETM2MAXCURRENT = 134
GETM1MAXCURRENT = 135
GETM2MAXCURRENT = 136
SETPWMMODE = 148
GETPWMMODE = 149
READEEPROM = 252
WRITEEEPROM = 253
FLAGBOOTLOADER = 255
#Private Functions
def crc_clear(self):
self._crc = 0
return
def crc_update(self,data):
self._crc = self._crc ^ (data << 8)
for bit in range(0, 8):
if (self._crc&0x8000) == 0x8000:
self._crc = ((self._crc << 1) ^ 0x1021)
else:
self._crc = self._crc << 1
return
def _sendcommand(self,address,command):
self.crc_clear()
self.crc_update(address)
# self._port.write(chr(address))
self._port.write(address.to_bytes(1, 'big'))
self.crc_update(command)
# self._port.write(chr(command))
self._port.write(command.to_bytes(1, 'big'))
return
def _readchecksumword(self):
data = self._port.read(2)
if len(data)==2:
# crc = (ord(data[0])<<8) | ord(data[1])
crc = (data[0]<<8) | data[1]
return (1,crc)
return (0,0)
def _readbyte(self):
data = self._port.read(1)
if len(data):
val = ord(data)
self.crc_update(val)
return (1,val)
return (0,0)
def _readword(self):
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
return (1,val1[1]<<8|val2[1])
return (0,0)
def _readlong(self):
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
val3 = self._readbyte()
if val3[0]:
val4 = self._readbyte()
if val4[0]:
return (1,val1[1]<<24|val2[1]<<16|val3[1]<<8|val4[1])
return (0,0)
def _readslong(self):
val = self._readlong()
if val[0]:
if val[1]&0x80000000:
return (val[0],val[1]-0x100000000)
return (val[0],val[1])
return (0,0)
def _writebyte(self,val):
self.crc_update(val&0xFF)
# self._port.write(chr(val&0xFF))
self._port.write(val.to_bytes(1, 'big'))
def _writesbyte(self,val):
self._writebyte(val)
def _writeword(self,val):
self._writebyte((val>>8)&0xFF)
self._writebyte(val&0xFF)
def _writesword(self,val):
self._writeword(val)
def _writelong(self,val):
self._writebyte((val>>24)&0xFF)
self._writebyte((val>>16)&0xFF)
self._writebyte((val>>8)&0xFF)
self._writebyte(val&0xFF)
def _writeslong(self,val):
self._writelong(val)
def _read1(self,address,cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address,cmd)
val1 = self._readbyte()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF!=crc[1]&0xFFFF:
return (0,0)
return (1,val1[1])
trys-=1
if trys==0:
break
return (0,0)
def _read2(self,address,cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address,cmd)
val1 = self._readword()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF!=crc[1]&0xFFFF:
return (0,0)
return (1,val1[1])
trys-=1
if trys==0:
break
return (0,0)
def _read4(self,address,cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address,cmd)
val1 = self._readlong()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF!=crc[1]&0xFFFF:
return (0,0)
return (1,val1[1])
trys-=1
if trys==0:
break
return (0,0)
def _read4_1(self,address,cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address,cmd)
val1 = self._readslong()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF!=crc[1]&0xFFFF:
return (0,0)
return (1,val1[1],val2[1])
trys-=1
if trys==0:
break
return (0,0)
def _read_n(self,address,cmd,args):
trys = self._trystimeout
while 1:
self._port.flushInput()
trys-=1
if trys==0:
break
failed=False
self._sendcommand(address,cmd)
data = [1,]
for i in range(0,args):
val = self._readlong()
if val[0]==0:
failed=True
break
data.append(val[1])
if failed:
continue
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF==crc[1]&0xFFFF:
return (data);
return (0,0,0,0,0)
def _writechecksum(self):
self._writeword(self._crc&0xFFFF)
val = self._readbyte()
if(len(val)>0):
if val[0]:
return True
return False
def _write0(self,address,cmd):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
if self._writechecksum():
return True
trys=trys-1
return False
def _write1(self,address,cmd,val):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writebyte(val)
if self._writechecksum():
return True
trys=trys-1
return False
def _write11(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writebyte(val1)
self._writebyte(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _write111(self,address,cmd,val1,val2,val3):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writebyte(val1)
self._writebyte(val2)
self._writebyte(val3)
if self._writechecksum():
return True
trys=trys-1
return False
def _write2(self,address,cmd,val):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writeword(val)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS2(self,address,cmd,val):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writesword(val)
if self._writechecksum():
return True
trys=trys-1
return False
def _write22(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writeword(val1)
self._writeword(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS22(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writesword(val1)
self._writeword(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS2S2(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writesword(val1)
self._writesword(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS24(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writesword(val1)
self._writelong(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS24S24(self,address,cmd,val1,val2,val3,val4):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writesword(val1)
self._writelong(val2)
self._writesword(val3)
self._writelong(val4)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4(self,address,cmd,val):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS4(self,address,cmd,val):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writeslong(val)
if self._writechecksum():
return True
trys=trys-1
return False
def _write44(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writelong(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4S4(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writeslong(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS4S4(self,address,cmd,val1,val2):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writeslong(val1)
self._writeslong(val2)
if self._writechecksum():
return True
trys=trys-1
return False
def _write441(self,address,cmd,val1,val2,val3):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writelong(val2)
self._writebyte(val3)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS441(self,address,cmd,val1,val2,val3):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writeslong(val1)
self._writelong(val2)
self._writebyte(val3)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4S4S4(self,address,cmd,val1,val2,val3):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writeslong(val2)
self._writeslong(val3)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4S441(self,address,cmd,val1,val2,val3,val4):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writebyte(val4)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4444(self,address,cmd,val1,val2,val3,val4):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4S44S4(self,address,cmd,val1,val2,val3,val4):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writeslong(val4)
if self._writechecksum():
return True
trys=trys-1
return False
def _write44441(self,address,cmd,val1,val2,val3,val4,val5):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
self._writebyte(val5)
if self._writechecksum():
return True
trys=trys-1
return False
def _writeS44S441(self,address,cmd,val1,val2,val3,val4,val5):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writeslong(val1)
self._writelong(val2)
self._writeslong(val3)
self._writelong(val4)
self._writebyte(val5)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4S44S441(self,address,cmd,val1,val2,val3,val4,val5,val6):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writeslong(val4)
self._writelong(val5)
self._writebyte(val6)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4S444S441(self,address,cmd,val1,val2,val3,val4,val5,val6,val7):
trys=self._trystimeout
while trys:
self._sendcommand(self,address,cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writelong(val4)
self._writeslong(val5)
self._writelong(val6)
self._writebyte(val7)
if self._writechecksum():
return True
trys=trys-1
return False
def _write4444444(self,address,cmd,val1,val2,val3,val4,val5,val6,val7):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
self._writelong(val5)
self._writelong(val6)
self._writelong(val7)
if self._writechecksum():
return True
trys=trys-1
return False
def _write444444441(self,address,cmd,val1,val2,val3,val4,val5,val6,val7,val8,val9):
trys=self._trystimeout
while trys:
self._sendcommand(address,cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
self._writelong(val5)
self._writelong(val6)
self._writelong(val7)
self._writelong(val8)
self._writebyte(val9)
if self._writechecksum():
return True
trys=trys-1
return False
#User accessible functions
def SendRandomData(self,cnt):
for i in range(0,cnt):
byte = random.getrandbits(8)
# self._port.write(chr(byte))
self._port.write(byte.to_bytes(1, 'big'))
return
def ForwardM1(self,address,val):
return self._write1(address,self.Cmd.M1FORWARD,val)
def BackwardM1(self,address,val):
return self._write1(address,self.Cmd.M1BACKWARD,val)
def SetMinVoltageMainBattery(self,address,val):
return self._write1(address,self.Cmd.SETMINMB,val)
def SetMaxVoltageMainBattery(self,address,val):
return self._write1(address,self.Cmd.SETMAXMB,val)
def ForwardM2(self,address,val):
return self._write1(address,self.Cmd.M2FORWARD,val)
def BackwardM2(self,address,val):
return self._write1(address,self.Cmd.M2BACKWARD,val)
def ForwardBackwardM1(self,address,val):
return self._write1(address,self.Cmd.M17BIT,val)
def ForwardBackwardM2(self,address,val):
return self._write1(address,self.Cmd.M27BIT,val)
def ForwardMixed(self,address,val):
return self._write1(address,self.Cmd.MIXEDFORWARD,val)
def BackwardMixed(self,address,val):
return self._write1(address,self.Cmd.MIXEDBACKWARD,val)
def TurnRightMixed(self,address,val):
return self._write1(address,self.Cmd.MIXEDRIGHT,val)
def TurnLeftMixed(self,address,val):
return self._write1(address,self.Cmd.MIXEDLEFT,val)
def ForwardBackwardMixed(self,address,val):
return self._write1(address,self.Cmd.MIXEDFB,val)
def LeftRightMixed(self,address,val):
return self._write1(address,self.Cmd.MIXEDLR,val)
def ReadEncM1(self,address):
return self._read4_1(address,self.Cmd.GETM1ENC)
def ReadEncM2(self,address):
return self._read4_1(address,self.Cmd.GETM2ENC)
def ReadSpeedM1(self,address):
return self._read4_1(address,self.Cmd.GETM1SPEED)
def ReadSpeedM2(self,address):
return self._read4_1(address,self.Cmd.GETM2SPEED)
def ResetEncoders(self,address):
return self._write0(address,self.Cmd.RESETENC)
def ReadVersion(self,address):
trys=self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address,self.Cmd.GETVERSION)
str = ""
passed = True
for i in range(0,48):
data = self._port.read(1)
if len(data):
val = ord(data)
self.crc_update(val)
if(val==0):
break
# str+=data[0]
str+=chr(data[0])
else:
passed = False
break
if passed:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF==crc[1]&0xFFFF:
return (1,str)
else:
time.sleep(0.01)
trys-=1
if trys==0:
break
return (0,0)
def SetEncM1(self,address,cnt):
return self._write4(address,self.Cmd.SETM1ENCCOUNT,cnt)
def SetEncM2(self,address,cnt):
return self._write4(address,self.Cmd.SETM2ENCCOUNT,cnt)
def ReadMainBatteryVoltage(self,address):
return self._read2(address,self.Cmd.GETMBATT)
def ReadLogicBatteryVoltage(self,address,):
return self._read2(address,self.Cmd.GETLBATT)
def SetMinVoltageLogicBattery(self,address,val):
return self._write1(address,self.Cmd.SETMINLB,val)
def SetMaxVoltageLogicBattery(self,address,val):
return self._write1(address,self.Cmd.SETMAXLB,val)
def SetM1VelocityPID(self,address,p,i,d,qpps):
# return self._write4444(address,self.Cmd.SETM1PID,long(d*65536),long(p*65536),long(i*65536),qpps)
return self._write4444(address,self.Cmd.SETM1PID,d*65536,p*65536,i*65536,qpps)
def SetM2VelocityPID(self,address,p,i,d,qpps):
# return self._write4444(address,self.Cmd.SETM2PID,long(d*65536),long(p*65536),long(i*65536),qpps)
return self._write4444(address,self.Cmd.SETM2PID,d*65536,p*65536,i*65536,qpps)
def ReadISpeedM1(self,address):
return self._read4_1(address,self.Cmd.GETM1ISPEED)
def ReadISpeedM2(self,address):
return self._read4_1(address,self.Cmd.GETM2ISPEED)
def DutyM1(self,address,val):
return self._writeS2(address,self.Cmd.M1DUTY,val)
def DutyM2(self,address,val):
return self._writeS2(address,self.Cmd.M2DUTY,val)
def DutyM1M2(self,address,m1,m2):
return self._writeS2S2(address,self.Cmd.MIXEDDUTY,m1,m2)
def SpeedM1(self,address,val):
return self._writeS4(address,self.Cmd.M1SPEED,val)
def SpeedM2(self,address,val):
return self._writeS4(address,self.Cmd.M2SPEED,val)
def SpeedM1M2(self,address,m1,m2):
return self._writeS4S4(address,self.Cmd.MIXEDSPEED,m1,m2)
def SpeedAccelM1(self,address,accel,speed):
return self._write4S4(address,self.Cmd.M1SPEEDACCEL,accel,speed)
def SpeedAccelM2(self,address,accel,speed):
return self._write4S4(address,self.Cmd.M2SPEEDACCEL,accel,speed)
def SpeedAccelM1M2(self,address,accel,speed1,speed2):
return self._write4S4S4(address,self.Cmd.MIXEDSPEEDACCEL,accel,speed1,speed2)
def SpeedDistanceM1(self,address,speed,distance,buffer):
return self._writeS441(address,self.Cmd.M1SPEEDDIST,speed,distance,buffer)
def SpeedDistanceM2(self,address,speed,distance,buffer):
return self._writeS441(address,self.Cmd.M2SPEEDDIST,speed,distance,buffer)
def SpeedDistanceM1M2(self,address,speed1,distance1,speed2,distance2,buffer):
return self._writeS44S441(address,self.Cmd.MIXEDSPEEDDIST,speed1,distance1,speed2,distance2,buffer)
def SpeedAccelDistanceM1(self,address,accel,speed,distance,buffer):
return self._write4S441(address,self.Cmd.M1SPEEDACCELDIST,accel,speed,distance,buffer)
def SpeedAccelDistanceM2(self,address,accel,speed,distance,buffer):
return self._write4S441(address,self.Cmd.M2SPEEDACCELDIST,accel,speed,distance,buffer)
def SpeedAccelDistanceM1M2(self,address,accel,speed1,distance1,speed2,distance2,buffer):
return self._write4S44S441(address,self.Cmd.MIXEDSPEEDACCELDIST,accel,speed1,distance1,speed2,distance2,buffer)
def ReadBuffers(self,address):
val = self._read2(address,self.Cmd.GETBUFFERS)
if val[0]:
return (1,val[1]>>8,val[1]&0xFF)
return (0,0,0)
def ReadPWMs(self,address):
val = self._read4(address,self.Cmd.GETPWMS)
if val[0]:
pwm1 = val[1]>>16
pwm2 = val[1]&0xFFFF
if pwm1&0x8000:
pwm1-=0x10000
if pwm2&0x8000:
pwm2-=0x10000
return (1,pwm1,pwm2)
return (0,0,0)
def ReadCurrents(self,address):
val = self._read4(address,self.Cmd.GETCURRENTS)
if val[0]:
cur1 = val[1]>>16
cur2 = val[1]&0xFFFF
if cur1&0x8000:
cur1-=0x10000
if cur2&0x8000:
cur2-=0x10000
return (1,cur1,cur2)
return (0,0,0)
def SpeedAccelM1M2_2(self,address,accel1,speed1,accel2,speed2):
return self._write4S44S4(address,self.Cmd.MIXEDSPEED2ACCEL,accel,speed1,accel2,speed2)
def SpeedAccelDistanceM1M2_2(self,address,accel1,speed1,distance1,accel2,speed2,distance2,buffer):
return self._write4S444S441(address,self.Cmd.MIXEDSPEED2ACCELDIST,accel1,speed1,distance1,accel2,speed2,distance2,buffer)
def DutyAccelM1(self,address,accel,duty):
return self._writeS24(address,self.Cmd.M1DUTYACCEL,duty,accel)
def DutyAccelM2(self,address,accel,duty):
return self._writeS24(address,self.Cmd.M2DUTYACCEL,duty,accel)
def DutyAccelM1M2(self,address,accel1,duty1,accel2,duty2):
return self._writeS24S24(address,self.Cmd.MIXEDDUTYACCEL,duty1,accel1,duty2,accel2)
def ReadM1VelocityPID(self,address):
data = self._read_n(address,self.Cmd.READM1PID,4)
if data[0]:
data[1]/=65536.0
data[2]/=65536.0
data[3]/=65536.0
return data
return (0,0,0,0,0)
def ReadM2VelocityPID(self,address):
data = self._read_n(address,self.Cmd.READM2PID,4)
if data[0]:
data[1]/=65536.0
data[2]/=65536.0
data[3]/=65536.0
return data
return (0,0,0,0,0)
def SetMainVoltages(self,address,min, max):
return self._write22(address,self.Cmd.SETMAINVOLTAGES,min,max)
def SetLogicVoltages(self,address,min, max):
return self._write22(address,self.Cmd.SETLOGICVOLTAGES,min,max)
def ReadMinMaxMainVoltages(self,address):
val = self._read4(address,self.Cmd.GETMINMAXMAINVOLTAGES)
if val[0]:
min = val[1]>>16
max = val[1]&0xFFFF
return (1,min,max)
return (0,0,0)
def ReadMinMaxLogicVoltages(self,address):
val = self._read4(address,self.Cmd.GETMINMAXLOGICVOLTAGES)
if val[0]:
min = val[1]>>16
max = val[1]&0xFFFF
return (1,min,max)
return (0,0,0)
def SetM1PositionPID(self,address,kp,ki,kd,kimax,deadzone,min,max):
# return self._write4444444(address,self.Cmd.SETM1POSPID,long(kd*1024),long(kp*1024),long(ki*1024),kimax,deadzone,min,max)
return self._write4444444(address,self.Cmd.SETM1POSPID,kd*1024,kp*1024,ki*1024,kimax,deadzone,min,max)
def SetM2PositionPID(self,address,kp,ki,kd,kimax,deadzone,min,max):
# return self._write4444444(address,self.Cmd.SETM2POSPID,long(kd*1024),long(kp*1024),long(ki*1024),kimax,deadzone,min,max)
return self._write4444444(address,self.Cmd.SETM2POSPID,kd*1024,kp*1024,ki*1024,kimax,deadzone,min,max)
def ReadM1PositionPID(self,address):
data = self._read_n(address,self.Cmd.READM1POSPID,7)
if data[0]:
data[1]/=1024.0
data[2]/=1024.0
data[3]/=1024.0
return data
return (0,0,0,0,0,0,0,0)
def ReadM2PositionPID(self,address):
data = self._read_n(address,self.Cmd.READM2POSPID,7)
if data[0]:
data[1]/=1024.0
data[2]/=1024.0
data[3]/=1024.0
return data
return (0,0,0,0,0,0,0,0)
def SpeedAccelDeccelPositionM1(self,address,accel,speed,deccel,position,buffer):
return self._write44441(address,self.Cmd.M1SPEEDACCELDECCELPOS,accel,speed,deccel,position,buffer)
def SpeedAccelDeccelPositionM2(self,address,accel,speed,deccel,position,buffer):
return self._write44441(address,self.Cmd.M2SPEEDACCELDECCELPOS,accel,speed,deccel,position,buffer)
def SpeedAccelDeccelPositionM1M2(self,address,accel1,speed1,deccel1,position1,accel2,speed2,deccel2,position2,buffer):
return self._write444444441(address,self.Cmd.MIXEDSPEEDACCELDECCELPOS,accel1,speed1,deccel1,position1,accel2,speed2,deccel2,position2,buffer)
def SetM1DefaultAccel(self,address,accel):
return self._write4(address,self.Cmd.SETM1DEFAULTACCEL,accel)
def SetM2DefaultAccel(self,address,accel):
return self._write4(address,self.Cmd.SETM2DEFAULTACCEL,accel)
def SetPinFunctions(self,address,S3mode,S4mode,S5mode):
return self._write111(address,self.Cmd.SETPINFUNCTIONS,S3mode,S4mode,S5mode)
def ReadPinFunctions(self,address):
trys = self._trystimeout
while 1:
self._sendcommand(address,self.Cmd.GETPINFUNCTIONS)
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val1[0]:
val3 = self._readbyte()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF!=crc[1]&0xFFFF:
return (0,0)
return (1,val1[1],val2[1],val3[1])
trys-=1
if trys==0:
break
return (0,0)
def SetDeadBand(self,address,min,max):
return self._write11(address,self.Cmd.SETDEADBAND,min,max)
def GetDeadBand(self,address):
val = self._read2(address,self.Cmd.GETDEADBAND)
if val[0]:
return (1,val[1]>>8,val[1]&0xFF)
return (0,0,0)
#Warning(TTL Serial): Baudrate will change if not already set to 38400. Communications will be lost
def RestoreDefaults(self,address):
return self._write0(address,self.Cmd.RESTOREDEFAULTS)
def ReadTemp(self,address):
return self._read2(address,self.Cmd.GETTEMP)
def ReadTemp2(self,address):
return self._read2(address,self.Cmd.GETTEMP2)
def ReadError(self,address):
return self._read4(address,self.Cmd.GETERROR)
def ReadEncoderModes(self,address):
val = self._read2(address,self.Cmd.GETENCODERMODE)
if val[0]:
return (1,val[1]>>8,val[1]&0xFF)
return (0,0,0)
def SetM1EncoderMode(self,address,mode):
return self._write1(address,self.Cmd.SETM1ENCODERMODE,mode)
def SetM2EncoderMode(self,address,mode):
return self._write1(address,self.Cmd.SETM2ENCODERMODE,mode)
#saves active settings to NVM
def WriteNVM(self,address):
return self._write4(address,self.Cmd.WRITENVM,0xE22EAB7A)
#restores settings from NVM
#Warning(TTL Serial): If baudrate changes or the control mode changes communications will be lost
def ReadNVM(self,address):
return self._write0(address,self.Cmd.READNVM)
#Warning(TTL Serial): If control mode is changed from packet serial mode when setting config communications will be lost!
#Warning(TTL Serial): If baudrate of packet serial mode is changed communications will be lost!
def SetConfig(self,address,config):
return self._write2(address,self.Cmd.SETCONFIG,config)
def GetConfig(self,address):
return self._read2(address,self.Cmd.GETCONFIG)
def SetM1MaxCurrent(self,address,max):
return self._write44(address,self.Cmd.SETM1MAXCURRENT,max,0)
def SetM2MaxCurrent(self,address,max):
return self._write44(address,self.Cmd.SETM2MAXCURRENT,max,0)
def ReadM1MaxCurrent(self,address):
data = self._read_n(address,self.Cmd.GETM1MAXCURRENT,2)
if data[0]:
return (1,data[1])
return (0,0)
def ReadM2MaxCurrent(self,address):
data = self._read_n(address,self.Cmd.GETM2MAXCURRENT,2)
if data[0]:
return (1,data[1])
return (0,0)
def SetPWMMode(self,address,mode):
return self._write1(address,self.Cmd.SETPWMMODE,mode)
def ReadPWMMode(self,address):
return self._read1(address,self.Cmd.GETPWMMODE)
def ReadEeprom(self,address,ee_address):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address,self.Cmd.READEEPROM)
self.crc_update(ee_address)
self._port.write(chr(ee_address))
val1 = self._readword()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc&0xFFFF!=crc[1]&0xFFFF:
return (0,0)
return (1,val1[1])
trys-=1
if trys==0:
break
return (0,0)
def WriteEeprom(self,address,ee_address,ee_word):
retval = self._write111(address,self.Cmd.WRITEEEPROM,ee_address,ee_word>>8,ee_word&0xFF)
if retval==True:
trys = self._trystimeout
while 1:
self._port.flushInput()
val1 = self._readbyte()
if val1[0]:
if val1[1]==0xaa:
return True
trys-=1
if trys==0:
break
return False
def Open(self):
try:
self._port = serial.Serial(port=self.comport, baudrate=self.rate, timeout=1, interCharTimeout=self.timeout)
except:
return 0
return 1
as GPIO
import time
P_SERVO = 17 # adapt to your wiring
fPWM = 50 # Hz (not higher with software PWM)
a = 10
b = 2
def setup():
global pwm
GPIO.setmode(GPIO.BCM)
GPIO.setup(P_SERVO, GPIO.OUT)
pwm = GPIO.PWM(P_SERVO, fPWM)
pwm.start(0)
def setDirection(direction):
duty = a / 180 * direction + b
pwm.ChangeDutyCycle(duty)
print("direction =", direction, "-> duty =", duty)
time.sleep(1) # allow to settle
print("starting")
setup()
for direction in range(0, 181, 90):
setDirection(direction)
direction = 0
setDirection(0)
GPIO.cleanup()
print("done")
|
[
"[email protected]"
] | |
86dd568b43c382b6a37043b8cbb13b0a33717eda
|
a78be0e636b10d47128f6a12703a1f4b14562be5
|
/src/accounts/migrations/0004_remove_user_full_name.py
|
9573000ec68130afc1f0308bdcf50ef40740862e
|
[
"MIT"
] |
permissive
|
vykuntaharsha/onestopshop
|
7f0557408e8d4801dce371e25bdb0b305532e244
|
b9c4c9cbf603af87bee18fe20e04567ff70d42e1
|
refs/heads/master
| 2022-12-09T01:18:03.948132 | 2018-07-27T03:54:14 | 2018-07-27T03:54:14 | 131,765,577 | 1 | 0 |
MIT
| 2022-12-07T23:51:23 | 2018-05-01T21:34:32 |
Python
|
UTF-8
|
Python
| false | false | 391 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-12-10 03:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_user_full_name'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='full_name',
),
]
|
[
"[email protected]"
] | |
dd52e5517ce5f355698af46ebe674462ee81cba0
|
da1399f04b2542a2bb427df41ace68e9df9708da
|
/Level.py
|
e9933cbc3383c22d0b850e1c67326c7620acc61e
|
[] |
no_license
|
span/taxrunner
|
b436828005fc1ea157bcf1c787c08c361c6765c0
|
ff0861700121b2107334967378be3b18667c5d71
|
refs/heads/master
| 2016-09-06T02:10:05.808232 | 2015-02-10T16:45:35 | 2015-02-10T16:45:35 | 11,750,120 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,952 |
py
|
# This class contains the level sprites that we see on the screen. When this class is
# instantiated it draws a level of bricks, adds a player, taxman, background and cash. This
# class is then called from the main game loop through the update method. These calls are
# then propagated to the proper sprite classes as necessary.
#
# Author: Daniel Kvist
# E-mail: [email protected]
# Python version: 2.7
# OS: OS X
import pygame
from pygame.locals import *
from random import *
from Background import Background
from Brick import Brick
from Cash import Cash
from Player import Player
from TaxMan import TaxMan
class Level():
start_pos_y = 600
start_pos_x = 600
bricks = 24
# Loads and initiates the level
def __init__(self, game):
self.finished = False
self.sprites = pygame.sprite.Group()
self.surface = game.surface
self.speed = game.speed
self.surface_rect = self.surface.get_rect()
self.center = (self.surface_rect.width / 2, self.surface_rect.height / 2)
self.background = Background(self.speed)
self.heightfactor = 0
self.x = 0
self.y = 0
for i in range(0, self.bricks):
self.x = i * Brick.width
if i % 4 == 0:
self.x += Brick.width / 2
if self.heightfactor == 4:
self.heightfactor -= 2
else:
self.heightfactor += 1
self.y = self.start_pos_y - (50 * self.heightfactor)
brick = Brick(self.x, self.y, self.speed, self.bricks)
self.sprites.add(brick)
self.green_cash = Cash(self.surface_rect.width, self.y - 70, self.speed, 'green')
self.receipt = Cash(self.surface_rect.width, self.y - 50, self.speed * 1.5, 'receipt')
self.sprites.add(self.green_cash)
self.sprites.add(self.receipt)
self.player = Player(self.start_pos_x, 0, self.speed)
self.taxman = TaxMan(50, 100, self.speed)
self.sprites.add(self.taxman)
# Updates the sprite positions
def update(self, up):
self.background.update()
self.sprites.update(self.surface)
self.player.update(up)
self.player.collide(self.sprites, self.surface)
if self.player.rect.y > self.surface_rect.height:
self.player.state = 'busted'
# Draws the sprites
def draw(self):
self.background.draw(self.surface)
self.sprites.draw(self.surface)
self.player.draw(self.surface)
# If we hit some cash during the update phase, draw cash info to screen
if self.receipt.state == 'collect':
self.surface.blit(self.receipt.text, self.center)
if self.green_cash.state == 'collect':
self.surface.blit(self.green_cash.text, self.center)
def get_score(self):
return self.player.score.get_score()
|
[
"[email protected]"
] | |
ce75125d38790ce20168425c567cc0c99cb08509
|
38d23b1eb6c1728a73aac122ed4ecbd474d96410
|
/DjangoQuiz12/venv/Scripts/pasteurize-script.py
|
36596aa58ead1a309f02d472ce89ecd4a73f0372
|
[] |
no_license
|
apoorba/DjangoQuiz
|
a5c2ed766f41f9160bacd8c69ea1f8483832fe84
|
c92d713b8c7aa928bc8c817905b5d7f850dab212
|
refs/heads/master
| 2020-03-24T06:08:17.480818 | 2018-07-27T03:10:45 | 2018-07-27T03:10:45 | 142,517,480 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
#!C:\Users\AR\PycharmProjects\DjangoQuiz\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.16.0','console_scripts','pasteurize'
__requires__ = 'future==0.16.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.16.0', 'console_scripts', 'pasteurize')()
)
|
[
"[email protected]"
] | |
afd9ffeb1bc993d9503161429c26f6b38e550db9
|
3dbbde1aa96fc09e9aab885cf3713e86f3572dec
|
/gs-vtoi/bin/glacier
|
6e1a948d2c394d1ca44797d1d7fd32027f7bc0eb
|
[] |
no_license
|
bopopescu/gs-vtoi
|
6223d6dbf47e89292bd0e79e24e5664450e28cf6
|
f12b802976d0020179d1b40b0b5e3af5b72d55cc
|
refs/heads/master
| 2022-11-24T16:31:36.804869 | 2018-07-31T08:30:56 | 2018-07-31T08:30:56 | 282,551,982 | 0 | 0 | null | 2020-07-26T01:09:10 | 2020-07-26T01:09:09 | null |
UTF-8
|
Python
| false | false | 5,288 |
#!/Users/Sang/OneDrive/Developments/gs-vtoi/gs-vtoi/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Miguel Olivares http://moliware.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
glacier
~~~~~~~
Amazon Glacier tool built on top of boto. Look at the usage method to see
how to use it.
Author: Miguel Olivares <[email protected]>
"""
import sys
from boto.glacier import connect_to_region
from getopt import getopt, GetoptError
from os.path import isfile, basename
COMMANDS = ('vaults', 'jobs', 'upload')
def usage():
print("""
glacier <command> [args]
Commands
vaults - Operations with vaults
jobs - Operations with jobs
upload - Upload files to a vault. If the vault doesn't exits, it is
created
Common args:
--access_key - Your AWS Access Key ID. If not supplied, boto will
use the value of the environment variable
AWS_ACCESS_KEY_ID
--secret_key - Your AWS Secret Access Key. If not supplied, boto
will use the value of the environment variable
AWS_SECRET_ACCESS_KEY
--region - AWS region to use. Possible values: us-east-1, us-west-1,
us-west-2, ap-northeast-1, eu-west-1.
Default: us-east-1
Vaults operations:
List vaults:
glacier vaults
Jobs operations:
List jobs:
glacier jobs <vault name>
Uploading files:
glacier upload <vault name> <files>
Examples :
glacier upload pics *.jpg
glacier upload pics a.jpg b.jpg
""")
sys.exit()
def connect(region, debug_level=0, access_key=None, secret_key=None):
""" Connect to a specific region """
layer2 = connect_to_region(region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
debug=debug_level)
if layer2 is None:
print('Invalid region (%s)' % region)
sys.exit(1)
return layer2
def list_vaults(region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
for vault in layer2.list_vaults():
print(vault.arn)
def list_jobs(vault_name, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
print(layer2.layer1.list_jobs(vault_name))
def upload_files(vault_name, filenames, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
layer2.create_vault(vault_name)
glacier_vault = layer2.get_vault(vault_name)
for filename in filenames:
if isfile(filename):
sys.stdout.write('Uploading %s to %s...' % (filename, vault_name))
sys.stdout.flush()
archive_id = glacier_vault.upload_archive(
filename,
description = basename(filename))
print(' done. Vault returned ArchiveID %s' % archive_id)
def main():
if len(sys.argv) < 2:
usage()
command = sys.argv[1]
if command not in COMMANDS:
usage()
argv = sys.argv[2:]
options = 'a:s:r:'
long_options = ['access_key=', 'secret_key=', 'region=']
try:
opts, args = getopt(argv, options, long_options)
except GetoptError as e:
usage()
# Parse agument
access_key = secret_key = None
region = 'us-east-1'
for option, value in opts:
if option in ('-a', '--access_key'):
access_key = value
elif option in ('-s', '--secret_key'):
secret_key = value
elif option in ('-r', '--region'):
region = value
# handle each command
if command == 'vaults':
list_vaults(region, access_key, secret_key)
elif command == 'jobs':
if len(args) != 1:
usage()
list_jobs(args[0], region, access_key, secret_key)
elif command == 'upload':
if len(args) < 2:
usage()
upload_files(args[0], args[1:], region, access_key, secret_key)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | ||
894b89da175042a21e5348d0f8d8a09fff415934
|
7e38a33607e1496a8e25da2ea4de93aba0f7a6db
|
/tests/test_tx.py
|
148ede775632a48aacff47ec1ba3cf5ad4fee695
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
AustEcon/bitcoinX
|
c01f96626c72765a884f0f08f171aee5bb47dbe9
|
06796905626ffebf7b1eb92238b1771f103efee8
|
refs/heads/master
| 2022-12-03T06:00:09.898506 | 2022-11-21T07:16:42 | 2022-11-21T07:16:42 | 260,636,435 | 1 | 0 |
MIT
| 2020-05-02T07:29:44 | 2020-05-02T07:29:44 | null |
UTF-8
|
Python
| false | false | 19,997 |
py
|
from io import BytesIO
import random
import pytest
from bitcoinx import (
Script, PublicKey, SigHash, hash_to_hex_str, Bitcoin, BitcoinTestnet, JSONFlags,
)
from bitcoinx.tx import *
from bitcoinx.tx import LOCKTIME_THRESHOLD
from .utils import read_tx, read_tx_hex, read_signature_hashes, read_json_tx
def test_tx_read():
tx = read_tx('b59de025.txn')
assert tx.version == 2
assert len(tx.inputs) == 7
assert len(tx.outputs) == 3
assert tx.locktime == 0
def test_from_bytes():
tx_bytes = bytes.fromhex(read_tx_hex('b59de025.txn'))
tx = Tx.from_bytes(tx_bytes)
assert tx.to_bytes() == tx_bytes
def test_from_hex():
tx_hex = read_tx_hex('b59de025.txn')
tx = Tx.from_hex(tx_hex)
assert tx.to_hex() == tx_hex
def test_to_bytes_to_hex():
tx_hex = read_tx_hex('b59de025.txn')
tx = Tx.from_hex(tx_hex)
assert tx.to_bytes() == bytes.fromhex(tx_hex)
assert tx.to_hex() == tx_hex
def test_repr():
tx = read_tx('afda808f.txn')
assert repr(tx) == (
'Tx(version=1, inputs=[TxInput(prev_hash="00000000000000000000000000000000000000000000'
'00000000000000000000", prev_idx=4294967295, script_sig="0319c4082f626d67706f6f6c2e636f6d2'
'f5473537148110d9e7fcc3cf74ee70c0200", sequence=4294967295)], outputs=[TxOutput(value='
'1250005753, script_pubkey="76a914db1aea84aad494d9f5b253327da23c4e51266c9388ac")], '
'locktime=0)'
)
tx_testcases = ['503fd37f.txn']
@pytest.mark.parametrize("filename", tx_testcases)
def test_signature_hash(filename):
tx, values, pk_scripts = read_json_tx(filename)
correct_hashes = read_signature_hashes(filename.replace('.txn', '.sig_hashes'))
n = 0
for input_index, (value, pk_script, txin) in enumerate(zip(values, pk_scripts, tx.inputs)):
for sighash in range(256):
sighash = SigHash(sighash)
if sighash.has_forkid():
signature_hash = tx.signature_hash(input_index, value, pk_script, sighash)
assert signature_hash == correct_hashes[n]
n += 1
def test_signature_hash_bad():
tx, _, _ = read_json_tx('503fd37f.txn')
with pytest.raises(IndexError):
tx.signature_hash(-1, 5, b'', SigHash.ALL)
with pytest.raises(IndexError):
tx.signature_hash(2, 5, b'', SigHash.ALL)
with pytest.raises(ValueError):
tx.signature_hash(0, -1, b'', SigHash.ALL)
with pytest.raises(TypeError):
tx.signature_hash(0, 0, b'', 1)
tx.signature_hash(0, 0, b'', SigHash.NONE)
tx.signature_hash(1, 0, b'', SigHash(1))
@pytest.mark.parametrize("filename", tx_testcases)
def test_signatures(filename):
tx, values, pk_scripts = read_json_tx(filename)
for input_index, (value, pk_script, txin) in enumerate(zip(values, pk_scripts, tx.inputs)):
signature, pubkey = txin.script_sig.ops()
pubkey = PublicKey.from_bytes(pubkey)
signature_hash = tx.signature_hash(input_index, value, pk_script, SigHash(signature[-1]))
assert pubkey.verify_der_signature(signature[:-1], signature_hash, None)
class TestTx:
def test_is_coinbase(self):
tx = read_tx('afda808f.txn')
assert tx.is_coinbase()
def test_are_inputs_final(self):
tx = read_tx('b59de025.txn')
assert tx.are_inputs_final()
tx.inputs[4].sequence += 1
assert not tx.are_inputs_final()
@pytest.mark.parametrize("nin, nout", ((1, 1), (1, 253), (253, 65536), (65536, 1)))
def test_size(self, nin, nout):
tx_in = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
tx_out = TxOutput(0, b'')
tx = Tx(2, [tx_in] * nin, [tx_out] * nout, 0)
assert tx.size() == len(tx.to_bytes())
@pytest.mark.parametrize("locktime,inputs_final,height,timestamp,answer", (
# Locktime 0 is always final
(0, False, 0, 0, True),
(0, False, 1, 1, True),
(0, True, 0, 0, True),
(0, True, 1, 1, True),
# Locktime 1 is final only from block height 2
(1, False, 0, 0, False),
(1, False, 1, 0, False),
(1, False, 2, 0, True),
# If all inputs a final a tx is always final
(1, True, 0, 0, True),
(1, True, 1, 0, True),
(1, True, 2, 0, True),
# If < LOCKTIME_THRESHOLD, it's height-based
(LOCKTIME_THRESHOLD - 1, False, LOCKTIME_THRESHOLD - 1, 0, False),
(LOCKTIME_THRESHOLD - 1, False, LOCKTIME_THRESHOLD, 0, True),
(LOCKTIME_THRESHOLD - 1, True, LOCKTIME_THRESHOLD - 1, 0, True),
(LOCKTIME_THRESHOLD - 1, True, LOCKTIME_THRESHOLD, 0, True),
# If >= LOCKTIME_THRESHOLD, it's time-based
(LOCKTIME_THRESHOLD, False, LOCKTIME_THRESHOLD + 1, 0, False),
(LOCKTIME_THRESHOLD, False, 0, LOCKTIME_THRESHOLD, False),
(LOCKTIME_THRESHOLD, False, 0, LOCKTIME_THRESHOLD + 1, True),
(LOCKTIME_THRESHOLD, True, LOCKTIME_THRESHOLD + 1, 0, True),
(LOCKTIME_THRESHOLD, True, 0, LOCKTIME_THRESHOLD, True),
(LOCKTIME_THRESHOLD, True, 0, LOCKTIME_THRESHOLD + 1, True),
))
def test_is_final_for_block(self, locktime, inputs_final, height, timestamp, answer):
tx = read_tx('b59de025.txn')
tx.locktime = locktime
if not inputs_final:
tx.inputs[0].sequence = 0xfffffffe
assert tx.is_final_for_block(height, timestamp) == answer
def test_hash(self):
tx = read_tx('b59de025.txn')
assert tx.hex_hash() == 'b59de0255081f8032c521a1e70d9355876309a0c69e034db31c2ed387e9da809'
def test_total_output(self):
tx = read_tx('b59de025.txn')
assert tx.total_output_value() == 59_999_999_818
@pytest.mark.parametrize("script,coin,json", (
# Genesis tx
(
'01000000010000000000000000000000000000000000000000000000000000000000000000FFFFFFFF4'
'D04FFFF001D0104455468652054696D65732030332F4A616E2F32303039204368616E63656C6C6F7220'
'6F6E206272696E6B206F66207365636F6E64206261696C6F757420666F722062616E6B73FFFFFFFF010'
'0F2052A01000000434104678AFDB0FE5548271967F1A67130B7105CD6A828E03909A67962E0EA1F61DE'
'B649F6BC3F4CEF38C4F35504E51EC112DE5C384DF7BA0B8D578A4C702B6BF11D5FAC00000000',
Bitcoin,
{
'version': 1,
'nInputs': 1,
'vin': [
{
'coinbase': '04ffff001d0104455468652054696d65732030332f4a616e2f323030392'
'04368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f'
'757420666f722062616e6b73',
'text': '\x04��\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on '
'brink of second bailout for banks',
'sequence': 4294967295
}
],
'nOutputs': 1,
'vout': [
{
'value': 5000000000,
'script': {
'asm': '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1'
'f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf'
'11d5f OP_CHECKSIG',
'hex': '4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0e'
'a1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6'
'bf11d5fac'
}
}
],
'locktime': 0,
'hash': '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b'
},
),
(
'0100000001e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e000000006'
'a47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778a79cd125f0b8022013b3'
'e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aab92ab425453104121030b56f95f6d8d5f6b8'
'4d4c7d6909423bd4b9cf189e9dd287fdea495582a3a5474feffffff01bd731f2c000000001976a914f6'
'7000134f47d60523a36505830115fd52bc656e88ac2bc30800',
Bitcoin,
{
'version': 1,
'nInputs': 1,
'vin': [
{
'hash': 'e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e',
'idx': 0,
'script':
{
'asm': '304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778'
'a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aa'
'b92ab42545310[ALL|FORKID] 030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf18'
'9e9dd287fdea495582a3a5474',
'hex': '47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d75457'
'78a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010'
'aab92ab425453104121030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf189e9dd28'
'7fdea495582a3a5474'
},
'sequence': 4294967294
}
],
'nOutputs': 1,
'vout': [
{
'value': 740258749,
'script':
{
'asm': 'OP_DUP OP_HASH160 f67000134f47d60523a36505830115fd52bc656e '
'OP_EQUALVERIFY OP_CHECKSIG',
'hex': '76a914f67000134f47d60523a36505830115fd52bc656e88ac'
}
}
],
'locktime': 574251,
'hash': '85d895859f19d8f0125f3a93af854a7b48c04cab8830f800cd5e4daaeb02dc00'
},
),
(
'0100000001e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e000000006'
'a47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778a79cd125f0b8022013b3'
'e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aab92ab425453104121030b56f95f6d8d5f6b8'
'4d4c7d6909423bd4b9cf189e9dd287fdea495582a3a5474feffffff01bd731f2c000000001976a914f6'
'7000134f47d60523a36505830115fd52bc656e88ac2bc30860',
Bitcoin,
{
'version': 1,
'nInputs': 1,
'vin': [
{
'hash': 'e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e',
'idx': 0,
'script':
{
'asm': '304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778'
'a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aa'
'b92ab42545310[ALL|FORKID] 030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf18'
'9e9dd287fdea495582a3a5474',
'hex': '47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d75457'
'78a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010'
'aab92ab425453104121030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf189e9dd28'
'7fdea495582a3a5474'
},
'sequence': 4294967294
}
],
'nOutputs': 1,
'vout': [
{
'value': 740258749,
'script':
{
'asm': 'OP_DUP OP_HASH160 f67000134f47d60523a36505830115fd52bc656e '
'OP_EQUALVERIFY OP_CHECKSIG',
'hex': '76a914f67000134f47d60523a36505830115fd52bc656e88ac'
}
}
],
'locktime': 1611186987,
'hash': '9eaa6c0529a2d151eb4f0c7cfe99125c54b8908a0d3e8f66423f769bb553a816'
},
),
), ids=['genesis', 'locktime block', 'locktime time'])
def test_to_json(self, script, coin, json):
flags = 0
assert Tx.from_hex(script).to_json(flags, coin) == json
json['size'] = len(script) // 2
flags += JSONFlags.SIZE
assert Tx.from_hex(script).to_json(flags, coin) == json
if json['locktime'] == 0:
json['locktimeMeaning'] = 'valid in any block'
elif json['locktime'] < 500_000_000:
json['locktimeMeaning'] = (f'valid in blocks with height greater than '
f'{json["locktime"]:,d}')
else:
json['locktimeMeaning'] = (
'valid in blocks with MTP greater than 2021-01-20 23:56:27 UTC'
)
flags += JSONFlags.LOCKTIME_MEANING
assert Tx.from_hex(script).to_json(flags, coin) == json
class TestTxInput:
def test_is_coinbase(self):
txin = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
assert txin.is_coinbase()
txin.prev_idx = 0
assert not txin.is_coinbase()
txin.prev_idx = 0xffffffff
assert txin.is_coinbase()
txin.prev_hash = bytes(31) + b'\1'
assert not txin.is_coinbase()
def test_is_final(self):
txin = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
assert txin.is_final()
txin.sequence -= 1
assert not txin.is_final()
def test_to_hex(self):
tx = read_tx('afda808f.txn')
assert tx.inputs[0].to_hex() == (
'0000000000000000000000000000000000000000000000000000000000000000ffffffff220319'
'c4082f626d67706f6f6c2e636f6d2f5473537148110d9e7fcc3cf74ee70c0200ffffffff'
)
@pytest.mark.parametrize("script_len", (0, 253, 65000, 120000))
def test_size(self, script_len):
txin = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
txin.script_sig = bytes(script_len)
assert txin.size() == len(txin.to_bytes())
@pytest.mark.parametrize("script,json", (
# Genesis coinbase
(
'0000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff'
'001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e'
'206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff',
{
'coinbase': '04ffff001d0104455468652054696d65732030332f4a616e2f323030'
'39204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261'
'696c6f757420666f722062616e6b73',
'text': '\x04��\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on brink '
'of second bailout for banks',
'sequence': 4294967295,
},
),
# Another coinbase
(
'0000000000000000000000000000000000000000000000000000000000000000ffffffff41032b2'
'c0a2f7461616c2e636f6d2f506c656173652070617920302e3520736174732f627974652c20696e'
'666f407461616c2e636f6d6419c0bead6d55ff46be0400ffffffff',
{
'coinbase': '032b2c0a2f7461616c2e636f6d2f506c656173652070617920302e352073617'
'4732f627974652c20696e666f407461616c2e636f6d6419c0bead6d55ff46be0400',
'text': '\x03+,\n/taal.com/Please pay 0.5 sats/byte, [email protected]\x19���mU'
'�F�\x04\x00',
'sequence': 4294967295,
}
),
# A P2PK signature
(
'c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd37040000000048473044022'
'04e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07'
'de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff',
{
'hash': 'c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704',
'idx': 0,
'script': {
'asm': '304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8'
'cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d09[ALL]',
'hex': '47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5f'
'b8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901'
},
'sequence': 4294967295,
},
),
), ids=['genesis', "coinbase", "p2pk"])
def test_to_json(self, script, json):
assert TxInput.from_hex(script).to_json(0, 0) == json
assert TxInput.from_hex(script).to_json(JSONFlags.CLASSIFY_OUTPUT_SCRIPT, 0) == json
assert TxInput.from_hex(script).to_json(JSONFlags.ENUMERATE_INPUTS, None) == json
n = random.randrange(0, 100)
json.update({'nInput': n})
assert TxInput.from_hex(script).to_json(JSONFlags.ENUMERATE_INPUTS, n) == json
class TestTxOutput:
def test_to_hex(self):
tx = read_tx('afda808f.txn')
assert tx.outputs[0].to_hex() == (
'f992814a000000001976a914db1aea84aad494d9f5b253327da23c4e51266c9388ac'
)
@pytest.mark.parametrize("script_len", (0, 253, 65000, 120000))
def test_size(self, script_len):
output = TxOutput(0, b'')
output.script_pubkey = bytes(script_len)
assert output.size() == len(output.to_bytes())
@pytest.mark.parametrize("script,json,coin,extra", (
# Genesis P2PK output
(
'00f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1'
'f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac',
{
'value': 5000000000,
'script': {
'asm': '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb64'
'9f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f OP_CHECKSIG',
'hex': '4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb6'
'49f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac'
},
},
Bitcoin,
{
'type': 'pubkey',
'pubkey': '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb6'
'49f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f',
'address': '1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa',
},
),
# P2PKH output
(
'7dd8db00000000001976a9141207c3cd11e35de894c432e9907f2dcb1446855888ac',
{
'value': 14407805,
'script': {
'asm': 'OP_DUP OP_HASH160 1207c3cd11e35de894c432e9907f2dcb14468558 '
'OP_EQUALVERIFY OP_CHECKSIG',
'hex': '76a9141207c3cd11e35de894c432e9907f2dcb1446855888ac',
},
},
BitcoinTestnet,
{
'type': 'pubkeyhash',
'address': 'mhAHm1zzjzuu61HhiQUyfjqqnewLQ3FM4s',
},
),
), ids=['p2pk', 'p2pkh'])
def test_to_json(self, script, coin, json, extra):
assert TxOutput.from_hex(script).to_json(0, coin) == json
assert TxOutput.from_hex(script).to_json(JSONFlags.ENUMERATE_OUTPUTS, coin) == json
n = random.randrange(0, 100)
json.update({'nOutput': n})
assert TxOutput.from_hex(script).to_json(JSONFlags.ENUMERATE_OUTPUTS, coin, n) == json
json['script'].update(extra)
assert TxOutput.from_hex(script).to_json(JSONFlags.CLASSIFY_OUTPUT_SCRIPT |
JSONFlags.ENUMERATE_OUTPUTS, coin, n) == json
|
[
"[email protected]"
] | |
2776efebcbbbc14a62ecb7939a01b767d64382b6
|
a6a6dfe651c394c0a316da0bd05b09c261024117
|
/visual_cluster.py
|
a76f26482e38129eee697d2054d187d6f4c7d6af
|
[] |
no_license
|
YuyaMurata/KMA
|
b963c6d9640151fd43d791bdfeff917baee22ab0
|
b7cca37c1dfdf60e48c174a5083c9a5cc1489703
|
refs/heads/master
| 2020-05-30T05:31:10.837398 | 2019-12-20T08:44:56 | 2019-12-20T08:44:56 | 189,562,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 894 |
py
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from mpl_toolkits.mplot3d.axes3d import Axes3D
import pyclustering
from pyclustering.cluster import xmeans
import pylab
filename = 'file/PC200_mainte_eval_center.csv'
data = pd.read_csv(filename, index_col='SID', delimiter=',', encoding='SJIS')
g = 'SCORE'
print(data)
n = int(max(data[g]))
clusters = data.iloc[:,11].values
axdata = data.iloc[:,[1,10]]
print([clusters.tolist()])
#pyclustering.utils.draw_clusters(axdata.values, [clusters.tolist()])
xs = 'AGE'
ys = 'AVG'
cl =data[g].tolist()
for i in range(0,n+1):
c = data[data[g] == i]
name = 'C'+str(i)
plt.scatter(c[xs], c[ys], c = cm.cmaps_listed.get(cl.index(i)), s=5, label=name)
print('cluster:'+str(len(set(data[g].values))))
#plt.legend(loc=4)
#plt.xlim(0, 10000)
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
|
[
"[email protected]"
] | |
1f65100839d9ff8b15648173db4bdc566eb7e7b4
|
439e3b0fcc8959483bc35ff9c1229ce240037bbe
|
/tests/test_kanwa.py
|
db033372d8214dbf5ebd4d6f1563242af952d467
|
[
"MIT"
] |
permissive
|
403JFW/kakasi-utils
|
16fe27265f1b7f05045e4370cf19de080c649e8f
|
698b6fc8d812637473dc941b36d9ccff87410d0c
|
refs/heads/master
| 2021-01-02T09:54:13.425825 | 2014-04-15T00:55:51 | 2014-04-15T00:55:51 | 17,693,614 | 3 | 0 | null | 2014-05-12T06:39:36 | 2014-03-13T03:37:09 |
Python
|
UTF-8
|
Python
| false | false | 1,754 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import unittest
from kakasi_utils.kanwa import Kanwa
class TestKanwa(unittest.TestCase):
def test_merge(self):
"""Test merge"""
# Get dict file paths
data_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
in_files = [
data_dir + "test_kanwa_input_a.txt",
data_dir + "test_kanwa_input_b.txt"
]
out_file = data_dir + "test_kanwa_output.txt"
# Run merge
kanwa = Kanwa()
kanwa.merge(in_files, out_file)
# Assert result
for in_file in in_files:
self._assert_dict_in_dict(in_file, out_file)
# Check duplication
self._load_dict(out_file, check_duplication=True)
os.remove(out_file)
def _assert_dict_in_dict(self, file_child, file_parent):
"""Assert that child dict files item in parent dict file"""
dict_child = self._load_dict(file_child)
dict_parent = self._load_dict(file_parent)
for item in dict_child.keys():
if item not in dict_parent:
raise AssertionError("'%s' not exists in %s" % (
item, dict_parent))
def _load_dict(self, in_dict_file, check_duplication=False):
"""Load KAKASI dict file and return python dict"""
table = {}
with codecs.open(in_dict_file, 'rU', 'euc_jp') as in_file:
for line in in_file:
line = line.rstrip()
if line[0:2] == ';;':
continue
if check_duplication and (line in table):
raise AssertionError("'%s' duplicates" % line)
table[line] = True
return table
|
[
"[email protected]"
] | |
c6ce9e4a4ce2934670386105b410efd371bb56c3
|
87140007e96872d3611f0778eb0eebe5799616d7
|
/runs/1000KB/src2-tgt1/seq-nobro-iter08000.cfg.py
|
d889d92141bd83f110aee7e52fd487b910171abe
|
[
"MIT"
] |
permissive
|
janpawellek/broeval
|
49499fa302abff916ffced201034d3b9394503cd
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
refs/heads/master
| 2021-01-11T12:19:13.619220 | 2016-12-20T16:23:27 | 2016-12-20T16:23:27 | 76,468,134 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 663 |
py
|
# Write results to this file
OUTFILE = 'runs/1000KB/src2-tgt1/seq-nobro-iter08000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False, False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 8000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 6
|
[
"[email protected]"
] | |
086d727ec9d02496b8aed3098471ba7117856217
|
8b7a9ba0c2f8259ee7cfdfa83d8b702cb494c95b
|
/MyQuickDQR.py
|
a5e9a3a657d8985a49cee91a27141c9f92aff41f
|
[] |
no_license
|
danyiwang/Fraud-Analytics-Coursework
|
cd216b7e6c57470b513bbb29eb92248d851a17b7
|
04396a7b7c13476a3493a4f3aeed36273cdf1d78
|
refs/heads/master
| 2020-04-25T11:56:23.792373 | 2019-04-11T09:31:55 | 2019-04-11T09:31:55 | 172,761,849 | 0 | 0 | null | 2019-02-26T18:14:05 | 2019-02-26T17:55:47 |
Python
|
UTF-8
|
Python
| false | false | 13,117 |
py
|
## Note: the format and style can be customized.
## Process
## Step 1: read data
## import pandas as pd
## import MyQuickDQR as dqr
## mydata = pd.read_csv()
## Step 2: define category columns
## mydata[category_columns] = mydata[category_columns].astype('category')
## keep in mind that some columns may need to change from numeric to category: ZIP, etc.
## Step 3: generate Data Quality Report
## dqr.QuickDQR(mydata, 'xxx.docx')
## Step 4: If the output reads "Fail to add graph for (variable name)", you need to manually make a plot. Sorry for the inconvenience.
import pandas as pd
import numpy as np
import scipy.stats as sps
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
from docx import Document
from docx.shared import Inches
# Function to map out the summary table for Categorical Variable.
def MapCategory(cat):
# Initiate Empty Summary Table for Categorical Variable
cat_col = list(cat.columns)
t ={'colname': cat_col,
'n_record': cat_col,
'percent' : cat_col,
'unique_v': cat_col,
'mode': cat_col,
'count_mode': cat_col}
cat_table = pd.DataFrame(t)
for l in range(len(cat_table)):
cat_table.iloc[l,1] = cat.iloc[:,l].count()
cat_table.iloc[l,2] = round(cat_table.iloc[l,1] / len(cat) * 100,2)
cat_table.iloc[l,3] = len(cat.iloc[:,l].unique())
m = cat.iloc[:,l].value_counts()
cat_table.iloc[l,4] = m.index[0]
cat_table.iloc[l,5] = m.iloc[0]
return cat_table
def DesCategory(cat_table):
# Description for Categorical Variable
cat_description = []
for i in range(len(cat_table)):
name = str(cat_table['colname'][i])
n = str(int(cat_table['n_record'][i]))
p = str(round(cat_table['percent'][i],2))
unique_v = str(cat_table['unique_v'][i])
mode = str(cat_table['mode'][i])
count = str(cat_table['count_mode'][i])
cat_description.append(name+' is a categorical variable. '+name+\
' has '+n+' lines of records, and is '+p+\
'% populated. '+name+' has '+unique_v +\
' unique categories. The most common category is '\
+mode+ ', which occured '+count+' times out of '\
+n+' records. ')
return cat_description
def GraphCategory(cat):
sns.set_style("whitegrid")
# Create Category Graph
cat_col = list(cat.columns)
for c in cat_col:
m = cat[c].value_counts()
name = c + '.png'
level = len(m)
comment = []
try:
if level >= 20:
comment.append(c)
if m.iloc[0] / m.iloc[2] >= 8: # If the scale has too big difference
plot = cat[c].value_counts().head(20).plot(kind='bar')
plot.set_yscale('log')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
plot = cat[c].value_counts().head(20).plot(kind='bar')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
if m.iloc[0] / m.iloc[2] >= 8: # If the scale has too big difference
plot = cat[c].value_counts().plot(kind='bar')
plot.set_yscale('log')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
plot = cat[c].value_counts().plot(kind='bar')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
except:
print('Fail to create graph for', c, '. Try manually.')
# Description for Categorical Variable: comment on graphs
cat_description = []
for c in cat_col:
if c in comment:
cat_description.append('Below is a graph showing the destribution of '+c+': ')
else:
cat_description.append('Below is a graph showing the destribution of '+c+': (Showing top 20 categories)')
return cat_description
# Initiate Summary Table for Numerical Variable
def MapNumeric(num):
num_col = list(num.columns)
t ={'colname': num_col,
'n_record': num_col,
'percent' : num_col,
'unique_v': num_col,
'n_zero': num_col,
'mode': num_col,
'count_mode': num_col,
'min': num_col,
'max': num_col,
'mean': num_col,
'std': num_col,
}
num_table = pd.DataFrame(t)
# Fill in the Numerical Variable Summary Table
for l in range(len(num_table)):
num_table.iloc[l,1] = num.iloc[:,l].count()
num_table.iloc[l,2] = round(num_table.iloc[l,1] / len(num) * 100,2)
num_table.iloc[l,3] = len(num.iloc[:,l].unique())
num_table.iloc[l,4] = sum(num.iloc[:,l] == 0)
m = num.iloc[:,l].value_counts()
num_table.iloc[l,5] = m.index[0]
num_table.iloc[l,6] = m.iloc[0]
num_table.iloc[l,7] = num.iloc[:,l].min()
num_table.iloc[l,8] = round(num.iloc[:,l].max(), 2)
num_table.iloc[l,9] = round(num.iloc[:,l].mean(), 2)
num_table.iloc[l,10] = round(num.iloc[:,l].std(), 2)
return num_table
def DesNumeric(num_table):
# Description for Numerical Variable
num_description1 = []
for i in range(len(num_table)):
name = str(num_table['colname'][i])
n = str(int(num_table['n_record'][i]))
p = str(round(num_table['percent'][i],2))
unique_v = str(num_table['unique_v'][i])
n_zero = str(num_table['n_zero'][i])
mode = str(num_table['mode'][i])
count = str(num_table['count_mode'][i])
min_ = str(int(num_table['min'][i]))
max_ = str(int(num_table['max'][i]))
avg = str(round(num_table['mean'][i],2))
std = str(round(num_table['std'][i],2))
num_description1.append(name+' is a numeric variable. '+name+' has '\
+n+' lines of records, and is '+p+'% populated. '\
+name+' has '+unique_v +' unique categories. '\
+'The most common value is '+mode+ ', occured '\
+count+' times. '+name+' has '+n_zero+\
' zero values out of '+n+' lines of records. '\
+'The summary statistics and distribution is as follows: '\
+'(excluding null value)')
return num_description1
def GraphNumeric(num):
# Create Graph for Numerical Variable
num_col = list(num.columns)
for c in num_col:
null_remove = num[pd.isnull(num[c]) == False]
m = null_remove[c].value_counts()
mode_count = m.iloc[0]
next_mode_count = m.iloc[4]
name = c+'.png'
try:
if (mode_count / next_mode_count) >= 5:
sns.distplot(null_remove[c],bins = 80, kde=False, rug = False).set_yscale('log')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
sns.distplot(null_remove[c],bins = 80, kde=False, rug = False)
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
except:
print('Fail to create graph for',c,'. Try manually.')
def QuickDQR(mydata, filename):
# Divide fields in to Category Variable and Numerical Variable
cat = mydata.loc[:, mydata.dtypes == 'category']
num = mydata.loc[:, mydata.dtypes != 'category']
#Produce Cat Results
cat_table = MapCategory(cat)
cat_description1 = DesCategory(cat_table)
cat_description2 = GraphCategory(cat)
#Produce Num Results
num_table = MapNumeric(num)
num_description1 = DesNumeric(num_table)
GraphNumeric(num)
# Document Output!!!
document = Document()
document.add_heading('Data Quality Report', 0)
# High-level summary
document.add_heading('High-Level Description of the Data',level = 1)
document.add_paragraph('This dataset shows the information about (dataset name). '\
+'It covers the period from (1/1/2010) to (12/31/2010). '\
+'The dataset has '+str(mydata.shape[1])+\
' fields and '+str(mydata.shape[0])+' records.', style = 'Body Text')
# Summary table of all fields
document.add_heading('Summary Table of All Fields',level = 1)
document.add_paragraph('After understanding each field, I re-categorized '\
+'those fields into numerical and categorical fields. '\
+str(len(num_table))+' field is recognized as numerical field '\
+'and the rest of the '+str(len(cat_table))+' fields are categorical fields. '\
+'The following are two summary tables for categorical fields '\
+'and numerical fields followed by each individual field’s '\
+'detailed description respectively.',style = 'Body Text')
# Categorical Variable:
document.add_heading('Categorical Variable Summary: ', level = 2)
# Initiate Summary Table Header
table = document.add_table(rows = 1, cols = 6, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Most Common Category'
hdr_cells[5].text = 'Occurance of Common Category'
# Fill in Summary Table Cell
cat_col = list(cat.columns)
for i in range(len(cat_col)):
row_cells = table.add_row().cells
for j in range(6):
row_cells[j].text = str(cat_table.iloc[i,j])
# Individual Field:
document.add_heading('Individual Fields: ', level = 3)
for i in range(len(cat_description1)):
name = cat_col[i]
document.add_paragraph(name, style = 'List Number')
document.add_paragraph(cat_description1[i], style = 'Body Text')
table = document.add_table(rows = 1, cols = 6, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Most Common Category'
hdr_cells[5].text = 'Occurance of Common Category'
row_cells = table.add_row().cells
for j in range(6):
row_cells[j].text = str(cat_table.iloc[i,j])
document.add_paragraph(cat_description2[i], style = 'Body Text')
try:
document.add_picture(name+'.png')
except:
print('Fail to add graph for',name,'. Try manually. ')
# Numeric Variable:
document.add_heading('Numeric Variable Summary: ', level = 2)
# Initiate Summary Table Header
table = document.add_table(rows = 1, cols = 11, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Number of Zero'
hdr_cells[5].text = 'Most Common Value'
hdr_cells[6].text = 'Occurance of Common Value'
hdr_cells[7].text = 'Min'
hdr_cells[8].text = 'Max'
hdr_cells[9].text = 'Average'
hdr_cells[10].text = 'Standard Deviation'
# Fill in Summary Table Cell
num_col = list(num.columns)
for i in range(len(num_col)):
row_cells = table.add_row().cells
for j in range(11):
row_cells[j].text = str(num_table.iloc[i,j])
# Individual Field:
document.add_heading('Individual Fields: ', level = 3)
for i in range(len(num_description1)):
name = num_col[i]
document.add_paragraph(name, style = 'List Number')
document.add_paragraph(num_description1[i], style = 'Body Text')
table = document.add_table(rows = 1, cols = 11, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Number of Zero'
hdr_cells[5].text = 'Most Common Value'
hdr_cells[6].text = 'Occurance of Common Value'
hdr_cells[7].text = 'Min'
hdr_cells[8].text = 'Max'
hdr_cells[9].text = 'Average'
hdr_cells[10].text = 'Standard Deviation'
row_cells = table.add_row().cells
for j in range(11):
row_cells[j].text = str(num_table.iloc[i,j])
try:
document.add_picture(name+'.png')
except:
print('Fail to add graph for',name,'. Try manually. ')
document.save(filename)
|
[
"[email protected]"
] | |
576e64402ae40dbdfa0ff48cf98e3ac2a5e8f284
|
b5e2b49ff6cf665e25030c0e472bb3fccb4d8318
|
/comment/models.py
|
e2e51082354dac9ea270d39736ac802052248e2b
|
[
"Apache-2.0"
] |
permissive
|
sevenaper/simple_blog
|
f03b802bfe88239e176cd5ad613e7db5d1dc9fd8
|
2ebe177b02477ae1d108155545f8cbec9070fdb3
|
refs/heads/master
| 2021-01-24T12:37:55.352824 | 2018-12-18T06:58:15 | 2018-12-18T06:58:15 | 123,146,661 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
# Create your models here.
class Comment(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
text = models.TextField()
comment_time = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
ordering = ['-comment_time']
|
[
"[email protected]"
] | |
61d2d4e19055b2c5e330f42f506a5a25e1550bae
|
c0b3e2e92efe8e2a2497a0850b136f42e59d9303
|
/预分词.py
|
86a1088574c1a4fd86b5ef5f69200a42588ea4a9
|
[] |
no_license
|
Asuraqi/AushorShipAnalysis
|
126236b053a47947f344c05ab73a174650973c8e
|
1cc2a96f9e02e31b9f671ab88151fc0138db9c8e
|
refs/heads/master
| 2022-11-17T13:12:05.412748 | 2020-07-11T15:30:09 | 2020-07-11T15:30:09 | 278,858,763 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 932 |
py
|
import os
import re
import numpy as np
import jieba
import jieba.posseg
TRAIN_DIR = r"E:\TrainData"
OUT_DIR = r"D:\output_train"
for train_file in os.listdir(TRAIN_DIR):
file_path = os.path.join(TRAIN_DIR, train_file)
out_path = os.path.join(OUT_DIR, train_file + ".seg")
if os.path.exists(out_path):
print('eeeee')
continue
print(file_path)
with open(out_path, 'w', encoding='utf-8') as out:
with open(file_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
author_id, novel_id, chapter_id, content = line.strip('\n').split('\t')
word_list = []
for word, flag in jieba.posseg.cut(content):
if flag.startswith("x"):
continue
word_list.append(word + "/" + flag)
out.write(' '.join(word_list) + '\n')
|
[
"[email protected]"
] | |
82fd235db118646fc86003f4b9b8c9456cea7a02
|
c5758c1f4c880f4530df1a5ffb4c30ee2da445ee
|
/pytracking/vot_ep/sk3x3_meanmax_adaptive/vot_wrapper_sk3x3_meanmax_adaptive_ep0024.py
|
23ccb4ec2fc0650043d494e0655d884737572b61
|
[] |
no_license
|
bfjei2825401/d3s
|
6d662fc301181a0e3ad831b0db6111e3cf8f4097
|
32140a3c67252f0e98cbfbf6ad6d2a79267c221b
|
refs/heads/master
| 2023-02-27T09:57:25.692878 | 2021-01-27T14:20:57 | 2021-01-27T14:20:57 | 297,217,521 | 0 | 0 | null | 2020-09-21T03:23:09 | 2020-09-21T03:23:09 | null |
UTF-8
|
Python
| false | false | 2,523 |
py
|
import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3_meanmax_adaptive import SegmSK3x3MeanMaxAdaptive
from pytracking.parameter.segm_sk3x3_meanmax_adaptive import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(24)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3MeanMaxAdaptive(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
|
[
"[email protected]"
] | |
628e76b331e9bbba8db71887247e3e6d96e01d2d
|
a337eb50ad01d93a8fe27103169fd8a0675af4f7
|
/showEachHeight.py
|
0dcc5772c849939f6a1fd1e3dfd6f218f4f6fe0e
|
[] |
no_license
|
pangshumao/CARN
|
c18368c9f08bcf99bc205b53c5a4780ce744dc7e
|
dbdeae1abb4ea486b86dfa7806c9e3a4a3c9837e
|
refs/heads/master
| 2020-05-15T00:20:31.732168 | 2019-05-29T14:25:47 | 2019-05-29T14:25:47 | 182,010,317 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,806 |
py
|
import numpy as np
import scipy.io as sio
import os
from utils.preprocessing import scale2actual
from scipy.stats import ttest_rel as ttest
# rootDir = 'F:\\ImageData\\spine\\aligned(results from graham-previous landmarks)\\DenseNet-Gan\\disc_vb_height\\results'
# foldDir = 'F:\\ImageData\\spine\\aligned(results from graham-previous landmarks)\\DenseNet-Gan'
# depth = '8'
# method = 'GCNN'
# method = 'CNN'
# subDir = method + '_growth_rate_48_depth_' + depth + '_dataset_SPINE_gamma_0.5_lambda_g_0.0_lr_0.06'
# subDir = method + '_growth_rate_48_depth_' + depth + '_dataset_SPINE_gamma_0.5_lambda_g_0.0_lr_0.06_knn_20_laeweight_7.0'
rootDir = 'H:\\ImageData\\spine\\allData\\DenseNet-Gan-allData\\disc_vb_height\\results'
foldDir = 'H:\\ImageData\\spine\\allData\\DenseNet-Gan-allData'
depth = '8'
subDir = 'GCNN-SDNE_depth_8_dataset_SPINE+_gamma_0.5_lambda_g_0.0_lr_0.04_knn_20_laeweight_101.0_sdneweight_0.005'
# rootDir = 'H:\\ImageData\\spine\\allData\\DenseNet-Gan-allData\\disc_vb_height\\results'
# foldDir = 'H:\\ImageData\\spine\\allData\\DenseNet-Gan-allData'
# subDir = 'DenseNet_depth_26_dataset_SPINE+_gamma_0.5_lambda_g_0.0_lr_0.04_knn_20_laeweight_0.0_sdneweight_0.0'
# rootDir = 'H:\\ImageData\\spine\\allData\\DenseNet-Gan-allData-auNum\\disc_vb_height\\results'
# foldDir = 'H:\\ImageData\\spine\\allData\\DenseNet-Gan-allData-auNum'
# subDir = 'GCNN_depth_7_dataset_SPINE+_gamma_0.5_lambda_g_0.0_lr_0.04_knn_20_laeweight_101.0_sdneweight_0.005'
total_act_train_pre_y = np.zeros([860, 30])
total_act_train_y = np.zeros([860, 30])
total_act_val_pre_y = np.zeros([215, 30])
total_act_val_y = np.zeros([215, 30])
for i in range(1,6):
data = np.load(os.sep.join([rootDir, subDir, 'predict-fold' + str(i) + '.npz']))
pixelSizes = data['pixelSizes']
height = data['height']
width = data['width']
train_pre_y = data['train_pre_y']
train_y = data['train_y']
val_pre_y = data['val_pre_y']
val_y = data['val_y']
foldData = sio.loadmat(os.sep.join([foldDir, 'fold' + str(i) + '-ind.mat']))
trainInd = foldData['trainInd'].flatten()
valInd = foldData['valInd'].flatten()
act_train_pre_y = scale2actual(train_pre_y, pixelSizes[trainInd, :], np.tile(height, train_pre_y.shape),
np.tile(width, train_pre_y.shape), mode='height')
act_train_y = scale2actual(train_y, pixelSizes[trainInd, :], np.tile(height, train_y.shape),
np.tile(width, train_y.shape), mode='height')
act_val_pre_y = scale2actual(val_pre_y, pixelSizes[valInd, :], np.tile(height, val_pre_y.shape),
np.tile(width, val_pre_y.shape), mode='height')
act_val_y = scale2actual(val_y, pixelSizes[valInd, :], np.tile(height, val_y.shape),
np.tile(width, val_y.shape), mode='height')
total_act_train_pre_y[(i - 1) * 172: i * 172, :] = act_train_pre_y
total_act_train_y[(i - 1) * 172: i * 172, :] = act_train_y
total_act_val_pre_y[(i-1) * 43 : i * 43, :] = act_val_pre_y
total_act_val_y[(i - 1) * 43: i * 43, :] = act_val_y
pass
total_act_train_err = np.abs(total_act_train_pre_y - total_act_train_y)
total_act_val_err = np.abs(total_act_val_pre_y - total_act_val_y)
temp = np.mean(total_act_train_err, axis=1)
print(temp[172*3+76])
np.save('/'.join([rootDir, subDir, 'total_act_val_err.npy']), total_act_val_err)
train_mean_mae = np.mean(total_act_train_err, axis=0)
val_mean_mae = np.mean(total_act_val_err, axis=0)
print('train mean_mae = ', train_mean_mae)
print('val mean_mae = ', val_mean_mae)
sio.savemat(os.sep.join([rootDir, subDir, 'total_act_val_err.mat']), {'total_act_val_err':total_act_val_err})
train_disc_mae = np.mean(total_act_train_err[:, :15])
train_disc_std = np.std(total_act_train_err[:, :15])
train_vb_mae = np.mean(total_act_train_err[:, 15:])
train_vb_std = np.std(total_act_train_err[:, 15:])
train_total_mae = np.mean(total_act_train_err)
train_total_std = np.std(total_act_train_err)
val_disc_mae = np.mean(total_act_val_err[:, :15])
val_disc_std = np.std(total_act_val_err[:, :15])
val_vb_mae = np.mean(total_act_val_err[:, 15:])
val_vb_std = np.std(total_act_val_err[:, 15:])
val_total_mae = np.mean(total_act_val_err)
val_total_std = np.std(total_act_val_err)
print('............................................................................')
print('train disc_mae = %.4f, train disc_std = %.4f' % (train_disc_mae, train_disc_std))
print('val disc_mae = %.4f, val disc_std = %.4f' % (val_disc_mae, val_disc_std))
print('............................................................................')
print('train vb_mae = %.4f, train vb_std = %.4f' % (train_vb_mae, train_vb_std))
print('val vb_mae = %.4f, val vb_std = %.4f' % (val_vb_mae, val_vb_std))
print('............................................................................')
print('train total_mae = %.4f, train total_std = %.4f' % (train_total_mae, train_total_std))
print('val total_mae = %.4f, val total_std = %.4f' % (val_total_mae, val_total_std))
print('............................................................................')
best_val_disc = np.min(np.mean(total_act_val_err[:, :15], axis=1))
best_val_vb = np.min(np.mean(total_act_val_err[:, 15:], axis=1))
best_val_total = np.min(np.mean(total_act_val_err, axis=1))
print('best val disc_mae = %.4f' % best_val_disc)
print('best val vb_mae = %.4f' % best_val_vb)
print('best val total_mae = %.4f' % best_val_total)
worst_val_disc = np.max(np.mean(total_act_val_err[:, :15], axis=1))
worst_val_vb = np.max(np.mean(total_act_val_err[:, 15:], axis=1))
worst_val_total = np.max(np.mean(total_act_val_err, axis=1))
print('worst val disc_mae = %.4f' % worst_val_disc)
print('worst val vb_mae = %.4f' % worst_val_vb)
print('worst val total_mae = %.4f' % worst_val_total)
|
[
"[email protected]"
] | |
d469a66212b1827ea7a58d057836e8a4a912d946
|
a00054cc4bc93c7cc6b371a4ab8ce5aeb80eb271
|
/a0/a0.py
|
89566a3fa9c550b5809001311ab796499406b203
|
[] |
no_license
|
yrzhou0120berk/w266
|
707e5317e18b3dd87d1078d17415bdcb00ef297c
|
721e149922ec4dbc8abeb43e8702b2b2d2ab367e
|
refs/heads/master
| 2020-03-16T16:38:37.136839 | 2018-05-09T18:31:23 | 2018-05-09T18:31:23 | 132,797,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,392 |
py
|
# coding: utf-8
# # Assignment 0
#
# This notebook will help verify that you're all set up with the Python packages we'll be using this semester.
#
# **Your task:** just run the cells below, and verify that the output is as expected. If anything looks wrong, weird, or crashes, update your Python installation or contact the course staff. We don't want library issues to get in the way of the real coursework!
# In[1]:
# Version checks
import importlib
def version_greater_equal(v1, v2):
for x, y in zip(v1.split('.'), v2.split('.')):
if int(x) != int(y):
return int(x) > int(y)
return True
assert version_greater_equal('1.2.3', '0.1.1')
assert version_greater_equal('1.2.3', '0.5.1')
assert version_greater_equal('1.2.3', '1.2.3')
assert version_greater_equal('0.22.0', '0.20.3')
assert not version_greater_equal('1.1.1', '1.2.3')
assert not version_greater_equal('0.5.1', '1.2.3')
assert not version_greater_equal('0.20.3', '0.22.0')
def version_check(libname, min_version):
m = importlib.import_module(libname)
print ("%s version %s is " % (libname, m.__version__))
print ("OK"
if version_greater_equal(m.__version__, min_version)
else "out-of-date. Please upgrade!")
version_check("numpy", "1.13.3")
version_check("matplotlib", "2.1")
version_check("pandas", "0.22")
version_check("nltk", "3.2.5")
version_check("tensorflow", "1.8")
# ## TensorFlow
#
# We'll be using [TensorFlow](tensorflow.org) to build deep learning models this semester. TensorFlow is a whole programming system in itself, based around the idea of a computation graph and deferred execution. We'll be talking a lot more about it in Assignment 1, but for now you should just test that it loads on your system.
#
# Run the cell below; you should see:
# ```
# Hello, TensorFlow!
# 42
# ```
# In[2]:
import tensorflow as tf
hello = tf.constant("Hello, TensorFlow!")
sess = tf.Session()
print(sess.run(hello))
a = tf.constant(10)
b = tf.constant(32)
print(sess.run(a+b))
# ## NLTK
#
# [NLTK](http://www.nltk.org/) is a large compilation of Python NLP packages. It includes implementations of a number of classic NLP models, as well as utilities for working with linguistic data structures, preprocessing text, and managing corpora.
#
# NLTK is included with Anaconda, but the corpora need to be downloaded separately. Be warned that this will take up around 3.2 GB of disk space if you download everything! If this is too much, you can download individual corpora as you need them through the same interface.
#
# Type the following into a Python shell on the command line. It'll open a pop-up UI with the downloader:
#
# ```
# import nltk
# nltk.download()
# ```
#
# Alternatively, you can download individual corpora by name. The cell below will download the famous [Brown corpus](http://www.essex.ac.uk/linguistics/external/clmt/w3c/corpus_ling/content/corpora/list/private/brown/brown.html):
# In[3]:
import nltk
assert(nltk.download("brown")) # should return True if successful, or already installed
# Now we can look at a few sentences. Expect to see:
# ```
# The Fulton County Grand Jury said Friday an investigation of Atlanta's recent primary election produced `` no evidence '' that any irregularities took place .
#
# The jury further said in term-end presentments that the City Executive Committee , which had over-all charge of the election , `` deserves the praise and thanks of the City of Atlanta '' for the manner in which the election was conducted .
# ```
# In[4]:
from nltk.corpus import brown
# Look at the first two sentences
for s in brown.sents()[:2]:
print(" ".join(s))
print("")
# NLTK also includes a sample of the [Penn treebank](https://www.cis.upenn.edu/~treebank/), which we'll be using later in the course for parsing and part-of-speech tagging. Here's a sample of sentences, and an example tree. Expect to see:
# ```
# The top money funds are currently yielding well over 9 % .
#
# (S
# (NP-SBJ (DT The) (JJ top) (NN money) (NNS funds))
# (VP
# (VBP are)
# (ADVP-TMP (RB currently))
# (VP (VBG yielding) (NP (QP (RB well) (IN over) (CD 9)) (NN %))))
# (. .))
# ```
# In[5]:
assert(nltk.download("treebank")) # should return True if successful, or already installed
print("")
from nltk.corpus import treebank
# Look at the parse of a sentence.
# Don't worry about what this means yet!
idx = 45
print(" ".join(treebank.sents()[idx]))
print("")
print(treebank.parsed_sents()[idx])
# We can also look at the [Europarl corpus](http://www.statmt.org/europarl/), which consists of *parallel* text - a sentence and its translations to multiple languages. You should see:
# ```
# ENGLISH: Resumption of the session I declare resumed the session of the European Parliament adjourned on Friday 17 December 1999 , and I would like once again to wish you a happy new year in the hope that you enjoyed a pleasant festive period .
# ```
# and its translation into French and Spanish.
# In[6]:
assert(nltk.download("europarl_raw")) # should return True if successful, or already installed
print("")
from nltk.corpus import europarl_raw
idx = 0
print("ENGLISH: " + " ".join(europarl_raw.english.sents()[idx]))
print("")
print("FRENCH: " + " ".join(europarl_raw.french.sents()[idx]))
print("")
print("SPANISH: " + " ".join(europarl_raw.spanish.sents()[idx]))
|
[
"[email protected]"
] | |
8926a41249f5bf629df53010e4f7708af8c89efb
|
a69543dbb496cb05fdbadaf72aff9cc5f39cf073
|
/cracker3.py
|
9503293544fd986a5755c0fdec7bc344a3150df3
|
[] |
no_license
|
alghuried-ahod/py-breaking-docx-passwoed-
|
48bda54efa23f17aead3ed43fd9e06ce90b40586
|
bf19f2e28116a523a55a662851b4ddf2e76a5665
|
refs/heads/master
| 2016-08-12T06:30:50.920390 | 2016-02-21T22:30:58 | 2016-02-21T22:30:58 | 52,231,401 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,170 |
py
|
"""
Ahod Alghuried, D15123616
Mobile Forinsic, Lab #3
This Python program has been designed to break the password for docx files.
To get Windows Extensions you could download it from this link
( https://sourceforge.net/projects/pywin32/files/pywin32/Build%2520220/ ).
After download it write this command in the terminal ( pip install python-docx ).Then you will be able to import windows library.
"""
import sys
import win32com.client
from win32com import client
# To make Word object
a = client.Dispatch("Word.Application")
filename= sys.argv[1]
# open the world list which is available online, to get all possible passwords from it
password_file = open ( 'wordlist2.txt', 'r' )
passwords = password_file.readlines()
password_file.close()
passwords = [item.rstrip('\n') for item in passwords]
# write the correct password in this file
results = open('results.txt', 'w')
for password in passwords:
print(password)
try:
doc = a.Documents.Open(filename,True,True,True,PasswordDocument=password)
print "Success! Password is: " + password
results.write(password)
results.close()
sys.exit()
except:
print "Incorrect password"
pass
|
[
"[email protected]"
] | |
72acf8c8711eef2cf5d22862dd046ca6895b2303
|
a8412e4fe55924d407d51bca173bf2283086dd82
|
/signals_define.py
|
4e42105d820312e68269b223ce2fced193289e05
|
[] |
no_license
|
isabelangelo/SETI_2018
|
5f38a495ab88c9dc6a74f15e7459592664db4537
|
72dffea9308b4d2faa482d8f470f32f9bacc675e
|
refs/heads/master
| 2020-03-22T20:46:36.508024 | 2018-07-11T21:50:36 | 2018-07-11T21:50:36 | 140,630,175 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,062 |
py
|
gaussian = {'id':0,
'result_id': 0,
'peak_power': 0.0000000,
'mean_power': 0.0000000,
'time': 0.0000000000000000,
'ra': 0.0000000,
'dec': 0.0000000,
'q_pix': 0,
'freq': 0.0000000000000000,
'detection_freq': 0.0000000000000000,
'barycentric_freq': 0.0000000000000000,
'fft_len': 0,
'chirp_rate': 0.0000000,
'rfi_checked': 0,
'rfi_found': 0,
'reserved': 0,
'sigma': 0.0000000,
'chisqr': 0.0000000,
'null_chisqr': 0.0000000,
'score': 0.0000000,
'max_power': 0.0000000,
'pot': ''
}
pulse = {'id':0,
'result_id': 0,
'peak_power': 0.0000000,
'mean_power': 0.0000000,
'time': 0.0000000000000000,
'ra': 0.0000000,
'dec': 0.0000000,
'q_pix': 0,
'freq': 0.0000000000000000,
'detection_freq': 0.0000000000000000,
'barycentric_freq': 0.0000000000000000,
'fft_len': 0,
'chirp_rate': 0.0000000,
'rfi_checked': 0,
'rfi_found': 0,
'reserved': 0,
'period': 0.0000000,
'snr': 0.0000000,
'thresh': 0.0000000,
'score': 0.0000000,
'len_prof': 0,
'pot':''
}
spike = {'id':0,
'result_id': 0,
'peak_power': 0.0000000,
'mean_power': 0.0000000,
'time': 0.0000000000000000,
'ra': 0.0000000,
'dec': 0.0000000,
'q_pix': 0,
'freq': 0.0000000000000000,
'detection_freq': 0.0000000000000000,
'barycentric_freq': 0.0000000000000000,
'fft_len': 0,
'chirp_rate': 0.0000000,
'rfi_checked': 0,
'rfi_found': 0,
'reserved': 0,
}
autocorr = {'id':0,
'result_id': 0,
'peak_power': 0.0000000,
'mean_power': 0.0000000,
'time': 0.0000000000000000,
'ra': 0.0000000,
'dec': 0.0000000,
'q_pix': 0,
'delay': 0.0000000,
'freq': 0.0000000000000000,
'detection_freq': 0.0000000000000000,
'barycentric_freq': 0.0000000000000000,
'fft_len': 0,
'chirp_rate': 0.0000000,
'rfi_checked': 0,
'rfi_found': 0,
'reserved': 0,
}
triplet = {'id':0,
'result_id': 0,
'peak_power': 0.0000000,
'mean_power': 0.0000000,
'time': 0.0000000000000000,
'ra': 0.0000000,
'dec': 0.0000000,
'q_pix': 0,
'freq': 0.0000000000000000,
'detection_freq': 0.0000000000000000,
'barycentric_freq': 0.0000000000000000,
'fft_len': 0,
'chirp_rate': 0.0000000,
'rfi_checked': 0,
'rfi_found': 0,
'reserved': 0,
'period': 0.0000000
}
|
[
"[email protected]"
] | |
667a6a1286fe0c8a7c4877e2d9a1aab0a9a79399
|
c3ff891e0e23c5f9488508d30349259cc6b64b4d
|
/python练习/django exercise/FormsDemo/first/views.py
|
ebcd835469f1f68f965b5e54504c5e8ab9bab17f
|
[] |
no_license
|
JacksonMike/python_exercise
|
2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53
|
7698f8ce260439abb3cbdf478586fa1888791a61
|
refs/heads/master
| 2020-07-14T18:16:39.265372 | 2019-08-30T11:56:29 | 2019-08-30T11:56:29 | 205,370,953 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,089 |
py
|
from django.shortcuts import render, HttpResponse
from django.core.exceptions import ValidationError
from django.forms import widgets
from first.models import UserInfo
from django import forms
# Create your views here.
def register(request):
if request.method == "GET":
form = UserForm()
return render(request, "register.html", locals())
else:
print(request.POST)
form = UserForm(request.POST)
if form.is_valid():
# 匹配成功的数据
print(form.cleaned_data)
UserInfo.objects.create(**form.cleaned_data)
return HttpResponse("注册成功")
else:
# 未能匹配的数据
error_data = form.errors
return render(request, "register.html", locals())
class UserForm(forms.Form):
user = forms.CharField(max_length=7,
label="用户名",
error_messages={"required": "该字段不能为空"},
widget=widgets.TextInput(attrs={"class": "form-control"}))
pwd = forms.CharField(max_length=7,
label="密码",
error_messages={"required": "该字段不能为空"},
widget=widgets.PasswordInput(attrs={"class": "form-control"}))
email = forms.EmailField(min_length=5,
label="邮箱",
error_messages={"invalid": "邮箱格式错误", "required": "该字段不能为空"},
widget=widgets.EmailInput(attrs={"class": "form-control"}))
def clean_user(self):
"""判断用户名是否被注册"""
val = self.cleaned_data.get("user")
if not UserInfo.objects.filter(user=val).first():
return val
else:
raise ValidationError("该用户名已经被注册")
def clean_pwd(self):
val = self.cleaned_data.get("pwd")
if val.isdigit():
raise ValidationError("密码不能为纯数字")
else:
return val
|
[
"[email protected]"
] | |
2fd8dbf6abe18275d82d901f3df3fe5125902e96
|
cf873f8d8ca5611ddba0ca307a394565045b99d9
|
/Code_Unblock_the_Block.py
|
8f12c27da132c1482f04036feaa50622e3642b7f
|
[] |
no_license
|
Cyrton/RushHour-Python
|
4c598552c8673558033674f0fb09cb8ef767f3e5
|
7fb81081c0acd3e1365a2ac707222efa6c755fc2
|
refs/heads/main
| 2023-04-17T21:21:03.496202 | 2021-04-23T14:38:33 | 2021-04-23T14:38:33 | 308,906,986 | 0 | 1 | null | 2021-04-23T14:38:34 | 2020-10-31T15:03:04 |
Python
|
UTF-8
|
Python
| false | false | 2,539 |
py
|
# -*- coding: utf-8 -*-
"""
@author: marti and cyril
"""
from tkinter import *
from pickle import dump, load
from fenetre_charger import *
from fenetre_jouer import *
#----------------------------------Programme Fenetre Principale-----------------------------------------
#Fonction de la fenêtre principale
def Fenetre_Principale(fenetre_P,canvas_jouer, canvas_classement,fenetre_destroy):
if fenetre_destroy == 0:
canvas_jouer.destroy()
if fenetre_destroy == 1:
canvas_classement.destroy()
#Mise en page de la fenetre (Titre, Canvas, Image, Boutons)
canvas_general = Canvas(fenetre_P,width=900, height=800,bg='light yellow')
canvas_general.pack(side=TOP, fill=BOTH, expand=YES)
label = Label(canvas_general, text="Unblock The Block",font="Arial 20 italic underline",bg='light yellow',fg ='brown')
label.pack(padx=0,pady=20)
photo = PhotoImage(file="Image_Grille_page_de_garde_.gif")
canvas = Canvas(canvas_general,width=505, height=397,bg='white')
canvas.create_image(5, 5, anchor=NW, image=photo)
canvas.pack(padx=0,pady=5)
# NORMAL ou DISABLED
boutonQ = Button(canvas_general, text="QUITTER", bd =10, command = fenetre_P.destroy, font="Arial 16 bold",bg= 'tomato',activebackground='tomato3',relief="groove", width =20, height =1)
boutonC = Button(canvas_general, text="CLASSEMENT", bd =10,state = NORMAL, command = lambda: Fenetre_Charger(fenetre_P,Fenetre_Principale,canvas_general), font="Arial 16 bold",bg= 'sky blue',activebackground='sky blue',relief="groove", width = 20, height =1)
boutonJ = Button(canvas_general, text="JOUER", bd =10, command = lambda: Fenetre_Jouer(fenetre_P,Fenetre_Principale,canvas_general,NONE,0), font="Arial 16 bold",bg= '#66EC62',activebackground='light green',relief="groove", width =20, height =1)
boutonQ.pack(side = BOTTOM,padx=0, pady=20)
boutonC.pack(side = BOTTOM,padx=0, pady=0)
boutonJ.pack(side = BOTTOM,padx=0, pady=20)
fenetre_P.mainloop()
#----------------------------------------------------------------------------------------------
#----------------------------------Programme Principal-----------------------------------------
fenetre_P = Tk()
fenetre_P.minsize(width=900, height=800)
fenetre_P.maxsize(width=900, height=800)
fenetre_P['bg']='light yellow'
fenetre_P.title('UNBLOCK THE BLOCK')
nombreSauvegarde = 0
Fenetre_Principale(fenetre_P,NONE,NONE,NONE)
#----------------------------------------------------------------------------------------------
|
[
"[email protected]"
] | |
01bb1719a713b296a199496170f298ee3b30ab81
|
32d09d58d45b55f3032722ffa82432122287b5a9
|
/TE-1/PL-1/OSD/9. Uefi or Legacy/ueleg.py
|
7bdd7a6380e973329fe2e6740c3d3ddf3d1b6318
|
[
"Unlicense"
] |
permissive
|
adityajn105/College-Codes
|
013aa63710d5da71710f93d6aafc1417212e936d
|
f40e1eee53b951f2101981230fc72201081fd5f7
|
refs/heads/master
| 2021-09-11T14:19:25.053663 | 2018-04-08T19:49:13 | 2018-04-08T19:49:13 | 76,291,624 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 161 |
py
|
import os
if os.path.exists("/boot/efi") or os.path.exists("/boot/firmware/efi"):
print("System is booted to UEFI")
else:
print("System is booted to LEGACY")
|
[
"[email protected]"
] | |
a895ca4cd57fb5125091e7f2633fac2b96567c90
|
a996967a72b4ac0341c82635f2ca5fcb311b1080
|
/Programas com pygame/pygame/jogos/Pong-master/engine.py
|
546d9b3c36e9fe68b2a3f8a0d99e11fd6cd53660
|
[] |
no_license
|
Victormbg/Python
|
157eea29db78c3a17fa1670be3aad0e610dc8f14
|
8d7671486065dfcc3820e968fa7b50a42aa7b169
|
refs/heads/master
| 2022-06-27T03:36:00.166727 | 2022-06-14T23:51:16 | 2022-06-14T23:51:16 | 149,981,858 | 1 | 0 | null | 2020-01-28T21:02:15 | 2018-09-23T12:22:16 |
Python
|
UTF-8
|
Python
| false | false | 5,380 |
py
|
import random, sys
from math import *
from ball import *
from constants import *
from ai import *
"""
Desc file:
The Main Class Engine, for managing the logic of the game, contains a
global update() & draw() functions to update and draw the whole main game. Init all class too.
main function of the class is the main loop, it calls all the game logic.
update function : contains all the update() method of the game, same thing for draw().
TODO: Edit the evenListener()*
Date: 30/05/2017
Author: A.Amine (Goutted)
"""
class Engine(object):
def __init__(self):
"""Initalize the display and all game objects."""
self.screen = pg.display.get_surface()
self.Boundary = self.screen.get_rect()
self.clock = pg.time.Clock()
self.fps = FPS_MAX
self.dt = 1.0 # delta time isn't activated.
self.keys = pg.key.get_pressed()
self.done = False
self.color = GAME_COLOR
self.player = Paddle(pg.Rect(PL_PAD_X, PL_PAD_Y, PAD_W, PAD_H), 0, PAD_SPEED, self.color)
self.computer = Paddle(pg.Rect(AI_PAD_X, AI_PAD_Y, PAD_W, PAD_H), 0, PAD_SPEED + 1, self.color)
self.ball = Ball(pg.Rect(400, 300, BL_DIM, BL_DIM), BL_XVEL, BL_YVEL, self.color)
# creat new Ai fro the computer paddle : the interaction of ai is between camputer pad and ball.
self.ai = Ai(self.ball, self.computer)
# load relative resources to this class.
self.my_rc = Resources()
self.font = self.my_rc.loadFont('fixedsys.ttf', 55)
self.fpstext = self.my_rc.loadFont('arial.ttf', 15)
self.dashline = self.my_rc.loadImage('dash.png')
self.gamefx = self.my_rc.loadSound('gameloop.wav')
self.gamefx.set_volume(0.5)
# menu / game event listener
self.menuInfo = False
# display message you win/ loose to player.
self.winnerMsg = ''
def update(self):
# Update the player, computer, key pressed, ball, all stuffs & shits are updated here !
self.keys = pg.key.get_pressed()
self.ball.update(self.dt, self.player.shape, self.computer.shape)
self.player.update(self.dt)
self.ai.update(self.dt)
self.updateScore()
# then check for winner
self.winnerCheck()
def updateScore(self):
# for each update we see if there's a winner, and add +1pts for him.
if self.ball.winner is 'player':
self.player.score += 1
self.ball.winner = 'none'
elif self.ball.winner is 'ai':
self.computer.score += 1
self.ball.winner = 'none'
def quitGame(self):
pg.quit()
sys.exit()
def winnerCheck(self):
if self.player.score == MAX_SCORE:
self.winnerMsg = 'You Win !'
self.player.score = 0
self.computer.score = 0
self.done = True
self.menuInfo = False
elif self.computer.score == MAX_SCORE:
self.winnerMsg = 'Game Over'
self.player.score = 0
self.computer.score = 0
self.done = True
self.menuInfo = False
else:
self.winnerMsg ='Paused..'
def eventListener(self):
# the event keys to interact with the player.
for event in pg.event.get():
if event.type == pg.QUIT:
self.quitGame()
elif self.keys[pg.K_ESCAPE]:
self.quitGame()
elif self.keys[pg.K_SPACE]:
self.done = True
self.menuInfo = False
elif event.type == pg.KEYDOWN:
if event.key == pg.K_UP:
self.player.side = -1
elif event.key == pg.K_DOWN:
self.player.side = 1
elif event.key == pg.K_p:
self.gamefx.stop()
self.gamefx.play(-1)
elif event.key == pg.K_s:
self.gamefx.stop()
elif event.key == pg.K_F10:
self.switchFpsMode()
else:
self.player.side = 0
# enable disable fps
def switchFpsMode(self):
if not self.fps:
self.fps = FPS_MAX
self.dt = 1.0
else:
self.fps = 0
self.dt = FPS_MAX/self.clock.get_fps()
def draw(self):
# Draw all necessary objects to the level.
self.screen.fill(BACkGROUND_COLOR)
self.screen.blit(self.dashline, (425, 0))
self.player.draw()
self.computer.draw()
self.ball.draw()
self.my_rc.printFont(self.font, str(self.computer.score), AI_SCORE_POS)
self.my_rc.printFont(self.font, str(self.player.score), PL_SCORE_POS)
self.my_rc.printFont(self.fpstext, "fps: " + str(self.clock.get_fps()), (760, 5))
def mainLoop(self, menuInfo):
# play some fx sound
self.done = menuInfo
self.gamefx.play(-1)
# the main loop of the level.
while not self.done:
self.eventListener()
self.update()
self.draw()
pg.display.update()
self.clock.tick(self.fps)
self.gamefx.stop()
|
[
"[email protected]"
] | |
b41ea892a7093a5b363d79919a66fa1c446ea822
|
4de0321d3114750ba200bdbb73cf4ad8d1e0efdb
|
/spike_swarm_sim/actuators/led_actuator.py
|
cb9524ec9810621a43bb03214a87e38349332ef2
|
[
"MIT"
] |
permissive
|
r-sendra/SpikeSwarmSim
|
525f575eb8e14d7abfc70cb6757299f7b50cf589
|
a5bd71cb93df0963588640c5d44b3891fa07457c
|
refs/heads/main
| 2023-02-19T11:12:43.195818 | 2021-01-22T12:30:26 | 2021-01-22T12:30:26 | 328,186,128 | 0 | 0 | null | 2021-01-21T16:24:47 | 2021-01-09T15:32:15 |
Python
|
UTF-8
|
Python
| false | false | 304 |
py
|
from spike_swarm_sim.register import actuator_registry
@actuator_registry(name='led_actuator')
class LedActuator:
""" LED actuator that turns on or off the LED depending on
the action. """
def __init__(self):
self.on = 0
def step(self, action):
self.on = action
|
[
"[email protected]"
] | |
044981268b892180d97eb9fca6a3211510a808a1
|
2560ab2e2d90cf259a84de8eeedd9f1953e8943b
|
/aur.py
|
8aea9176c262176a9e2abf2d9ce08c4cd47467ce
|
[] |
no_license
|
ryantaylor/nas
|
6b17776979e2f235040aa31a485da5a6867db881
|
5aaeb539a0b50a357d5ed5080c95117e6ab5a6e0
|
refs/heads/master
| 2022-11-19T03:44:44.000137 | 2022-11-06T15:58:33 | 2022-11-06T15:58:33 | 143,469,834 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 980 |
py
|
import os
import subprocess
from pushover import Pushover
def check_updates(application, pushover):
application = application.strip()
os.chdir(f"{os.environ['AUR_DIR']}/{application}")
subprocess.run(['git', 'fetch'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
local_result = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
upstream_result = subprocess.run(['git', 'rev-parse', '@{u}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if local_result.stdout.decode('UTF-8') != upstream_result.stdout.decode('UTF-8'):
pushover.send(title='Update Available!', message=f'{application} can be updated!')
return True
return False
def main():
pushover = Pushover(os.environ['PUSHOVER_AUR_TOKEN'], os.environ['PUSHOVER_USER_KEY'])
for application in os.environ['AUR_APPLICATIONS'].split(','):
check_updates(application, pushover)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
66eb41b497be5f43356f205ce49307cd2e618a2e
|
63ba933a294865f65409635f62e0f1d59f725f37
|
/src/linkedLists/flatten.py
|
4b148dc9198652a6473dde5fda7746be4087ad87
|
[
"CC0-1.0"
] |
permissive
|
way2arun/datastructures_algorithms
|
fc4302bdbb923ef8912a4acf75a286f2b695de2a
|
4ea4c1579c28308455be4dfa02bd45ebd88b2d0a
|
refs/heads/master
| 2021-12-07T04:34:35.732026 | 2021-09-30T12:11:32 | 2021-09-30T12:11:32 | 203,658,808 | 1 | 0 | null | 2020-08-08T15:55:09 | 2019-08-21T20:23:46 |
Python
|
UTF-8
|
Python
| false | false | 3,212 |
py
|
"""
https://leetcode.com/explore/challenge/card/july-leetcoding-challenge/545/week-2-july-8th-july-14th/3386/
You are given a doubly linked list which in addition to the next and previous pointers, it could have a child pointer, which may or may not point to a separate doubly linked list. These child lists may have one or more children of their own, and so on, to produce a multilevel data structure, as shown in the example below.
Flatten the list so that all the nodes appear in a single-level, doubly linked list. You are given the head of the first level of the list.
Example 1:
Input: head = [1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]
Output: [1,2,3,7,8,11,12,9,10,4,5,6]
Explanation:
The multilevel linked list in the input is as follows:
After flattening the multilevel linked list it becomes:
Example 2:
Input: head = [1,2,null,3]
Output: [1,3,2]
Explanation:
The input multilevel linked list is as follows:
1---2---NULL
|
3---NULL
Example 3:
Input: head = []
Output: []
How multilevel linked list is represented in test case:
We use the multilevel linked list from Example 1 above:
1---2---3---4---5---6--NULL
|
7---8---9---10--NULL
|
11--12--NULL
The serialization of each level is as follows:
[1,2,3,4,5,6,null]
[7,8,9,10,null]
[11,12,null]
To serialize all levels together we will add nulls in each level to signify no node connects to the upper node of the previous level. The serialization becomes:
[1,2,3,4,5,6,null]
[null,null,7,8,9,10,null]
[null,11,12,null]
Merging the serialization of each level and removing trailing nulls we obtain:
[1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]
Constraints:
Number of Nodes will not exceed 1000.
1 <= Node.val <= 10^5
"""
# Definition for a Node.
class Node:
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
class Solution:
def flatten(self, head: 'Node') -> 'Node':
# Solution 1 - 36 ms
"""
if not head:
return head
order = []
stack = [head]
while stack:
curr = stack.pop()
order.append(curr)
if curr.next:
stack.append(curr.next)
if curr.child:
stack.append(curr.child)
curr.child = None
for i in range(len(order) - 1):
order[i].next = order[i + 1]
order[i + 1].prev = order[i]
return order[0]
"""
# Solution 2
pointer = head
branches = []
while pointer:
if pointer.child:
if pointer.next: branches.append(pointer.next)
pointer.next = pointer.child
pointer.child = None
pointer.next.prev = pointer
elif not pointer.next and len(branches) > 0:
pointer.next = branches.pop()
pointer.next.prev = pointer
pointer = pointer.next
return head
# Main Call
root_node = Node(1,2,"null",3)
print(root_node)
head = [1,2,None,3]
solution = Solution()
print(solution.flatten(root_node))
|
[
"[email protected]"
] | |
a6f371f9a7b5daf0f563373f301f2318be7fd87f
|
9d25837a05eaa9d32b7b88ab1dd133ab28dba0a2
|
/robot02_pwm.py
|
6bdc34684856ee5b0afbb6b66706048430896736
|
[] |
no_license
|
DarkMaguz/Python-pi
|
2d2d14c061c9a4437e68e5755bc49fc4b7a0a76e
|
34b49c5f4f26abc46d29c9f45fcccc954cd8a679
|
refs/heads/master
| 2020-04-16T23:47:08.963626 | 2018-04-08T13:43:55 | 2018-04-08T13:43:55 | 49,954,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,344 |
py
|
# -*- encoding: latin-1 -*-
import RPi.GPIO as GPIO
import time
import spidev
# Initialiser et ny spi objekt.
spi = spidev.SpiDev()
# Forbinder til den specifikke SPI enhed, ved "/dev/spidev0.0".
spi.open(0, 0) # Åben port 0, enhed 0.
# Set grænseværdien for hvornår vi med stor sandsynlighed kan sige at vi er på en streg.
graenseVaerdi = 50
# Den retning robotten sidst drejede. 0 for venstre og 1 for højre.
sidsteRetning = 1
# Standart duty cycle.
stdDC = 40
# Konfigurer Raspberry PI's GPIO.
# Fortæl hvilken måde hvorpå vi fortolker GPIO pin's på.
GPIO.setmode(GPIO.BOARD)
# Lav en liste indeholdende pins der bruges til mortorne.
motorPins = [11, 12, 15, 16]
# Initialiser en dict til og holde på pwm objekter.
pwm = {}
# Set pin nummerne i "motorPins" til output.
for pin in motorPins:
GPIO.setup(pin, GPIO.OUT)
# Sørg for at slukke før vi tænder, så løber robotten ikke væk fra os.
GPIO.output(pin, 0)
# Initialiser pwm på "pin" med 50Hz.
pwm[pin] = GPIO.PWM(pin, 50)
# Set duty cycle til 0, så løber robotten ikke væk fra os.
pwm[pin].start(0)
# Lav en liste af tuples til hver operation af motorne.
if True:
stop = [(11, 0), (12, 0), (15, 0), (16, 0)]
tilbage = [(12, 1), (15, 1)]
frem = [(11, 1), (16, 1)]
hoejre = [(11, 1), (15, 1)]
venstre = [(12, 1), (16, 1)]
else:
stop = "stop"
frem = "frem"
tilbage = "tilbage"
hoejre = "højre"
venstre = "venstre"
def robotDoPWM(pin, tilstand):
dc = stdDC if tilstand else 0
pwm[pin].ChangeDutyCycle(dc)
# Send signal til driver ICen L293D om hvilken retning robotten skal tag.
def robotDo(opperationer):
#print opperationer
for opperation in opperationer:
#GPIO.output(*opperation)
robotDoPWM(*opperation)
# Hent SPI data fra MCP3008 chippen.
def hentData(kanal):
adc = spi.xfer2([1,(8+kanal)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Retuner "true" hvis sensoren er over stregen ellers retuneres "false".
def erPaaStregen():
return hentData(0) > graenseVaerdi
# Skifter den retning roboten skal søge efter stregen.
# Det vil altid være modsat af den sidste retning.
def nyRetning():
global sidsteRetning
if sidsteRetning == 1:
sidsteRetning = 0
robotDo(venstre)
else:
sidsteRetning = 1
robotDo(hoejre)
# Genoptager den sidste retning.
def genoptag():
if sidsteRetning == 1:
robotDo(hoejre)
else:
robotDo(venstre)
def onExit():
robotDo(stop)
# Stop PWM på alle pins.
for pin in motorPins:
pwm[pin].stop()
# Nulstil GPIO instilningerne.
GPIO.cleanup()
# Eksekveringsbeskrivelse:
# 1) Drej til højre indtil stregen er under sensoren.
# 2) Kør ligeud så længe stregen er under under sensoren.
# 3) Hvis stregen forsvinder under sensoren, så drej i modsat retning af den forrige retning.
# 4) Gentag 2 - 3.
# Fang untagelser.
try:
robotDo(hoejre)
while True:
# Vent på at sensoren er over stregen.
while not erPaaStregen():
genoptag()
time.sleep(0.01)
robotDo(stop)
# Nu er sonsoren over stregen, så vi skal køre frem.
robotDo(frem)
# Køre fremad så længe sensoren er over stregen.
while (erPaaStregen()):
time.sleep(0.1)
# Sensoren er nu ikke længere over stregen, så skifter vi retning.
nyRetning()
# Ved Ctrl+C fanges untagelsen "KeyboardInterrupt".
except KeyboardInterrupt:
onExit()
finally:
onExit()
onExit()
|
[
"[email protected]"
] | |
182d2c133c867b48df3b915ff9cc056dcdba61d5
|
f03e50ab105c8dd97bda374fa2d604d480b85fb3
|
/apps/projects/models.py
|
ca45800eb9e00a783cdea3dae4a0abef2f2c4541
|
[] |
no_license
|
callowayproject/callowaysite
|
9717b7d934ef142b5e6b8fa1e0c93651382198bb
|
eb25d208586a7dc9ffb88660b07ad942ba9fe231
|
refs/heads/master
| 2022-12-15T11:38:57.787801 | 2019-07-14T13:21:13 | 2019-07-14T13:21:13 | 730,944 | 1 | 0 | null | 2022-11-22T00:40:56 | 2010-06-20T19:50:00 |
CSS
|
UTF-8
|
Python
| false | false | 1,511 |
py
|
import datetime
from django.db import models
from django.core.files.images import get_image_dimensions
from projects.settings import LOGO_STORAGE, PROJECT_TYPES, STATUSES
class Project(models.Model):
"""Something that we work on"""
name = models.CharField(blank=True, max_length=255)
description = models.TextField(blank=True)
code_url = models.CharField(blank=True, max_length=255)
docs_url = models.CharField(blank=True, max_length=255)
logo = models.FileField(blank=True, upload_to='projects/logos', storage=LOGO_STORAGE())
logo_width = models.IntegerField(editable=False, blank=True, null=True)
logo_height = models.IntegerField(editable=False, blank=True, null=True)
is_fork = models.BooleanField(default=False)
why_forked = models.TextField(blank=True, null=True)
external_id = models.IntegerField(blank=True, null=True)
project_type = models.IntegerField(choices=PROJECT_TYPES, default=2)
status = models.IntegerField(choices=STATUSES, default=0)
updated = models.DateTimeField(editable=False, default=datetime.datetime.now)
class Meta:
ordering = ('name', )
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if self.logo:
width, height = get_image_dimensions(self.logo.file, close=True)
else:
width, height = None, None
self.key_image_width = width
self.key_image_height = height
super(Project, self).save(*args, **kwargs)
|
[
"[email protected]"
] | |
3f0334e74a172b28d97ef4fe5641f86b7070ca66
|
9426f2e4f25c85c351a4d1b8855fe7d4cfd35210
|
/fardel_ecommerce/order/models.py
|
43289b3f939406b8fb0777a2648784f5577f747c
|
[] |
no_license
|
FardelCMS/fardel_ecommerce
|
52e4eaebb243c863f0dd6af22be093f4c90af8cd
|
d4221a7f4f7812d3e491234fc4cca6b828665ae3
|
refs/heads/master
| 2021-08-01T01:52:22.809056 | 2021-07-29T09:58:11 | 2021-07-29T09:58:11 | 229,290,203 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,175 |
py
|
import datetime
from ..checkout.models import Cart, CartLine
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from flask_sqlalchemy import BaseQuery
from flask_jwt_extended import current_user
from fardel_ecommerce.product.models import ProductVariant
from fardel.ext import db
class Order(db.Model):
__tablename__ = "orders"
"""
Status Types:
:Fulfiled:
:Unfulfiled:
:Canceled:
:Done:
"""
id = db.Column(db.Integer, primary_key=True, index=True)
status = db.Column(db.String(64), default="Unfulfiled")
user_id = db.Column(db.Integer, db.ForeignKey('auth_users.id'))
address_id = db.Column(db.Integer, db.ForeignKey('auth_users_address.id'))
create_time = db.Column(db.TIMESTAMP, default=func.current_timestamp())
total = db.Column(db.Integer, default=0)
quantity = db.Column(db.Integer, default=0)
data = db.Column(JSONB())
user = db.relationship("User")
address = db.relationship("UserAddress")
lines = db.relationship("OrderLine")
@staticmethod
def create_from_cart(cart_id, address_id):
cart = Cart.query.filter_by(token=cart_id).first()
if current_user.id == cart.user_id:
order = Order(
user_id=cart.user_id,
total=cart.total,
quantity=cart.quantity,
address_id=address_id,
data=cart.checkout_data
)
db.session.add(order)
db.session.commit()
for line in cart.lines:
order_line = OrderLine(
order_id=order.id,
variant_id=line.variant_id,
quantity=line.quantity,
total=line.get_total(),
data=line.data
)
db.session.add(order_line)
cart.clear()
db.session.flush()
return order
else:
return None
@property
def is_shipping_required(self):
"""Return `True` if any of the lines requires shipping."""
if not hasattr(self, '_is_shipping_required'):
self._is_shipping_required = False
for line in self.lines:
if line.variant.is_shipping_required:
self._is_shipping_required = True
break
return self._is_shipping_required
def delete_line(self, variant_id, data):
""" Delete a line with specified variant_id+data """
line = self.get_line(variant_id, data)
line.delete()
def set_fulfiled(self):
for line in self.lines:
line.variant.quantity_allocated = ProductVariant.quantity_allocated + line.quantity
self.status = "Fulfiled"
db.session.flush()
def dict(self):
""" Serialize object to json """
return {
'id': self.id,
'status': self.status,
'address': self.address.dict(),
'total': self.total,
'quantity': self.quantity,
'lines': [line.dict() for line in self.lines],
'is_shipping_required': self.is_shipping_required,
}
class OrderLine(db.Model):
__tablename__ = "order_lines"
id = db.Column(db.Integer, primary_key=True, index=True)
order_id = db.Column(db.ForeignKey('orders.id'))
variant_id = db.Column(db.Integer,
db.ForeignKey('product_product_variants.id', ondelete="CASCADE"))
total = db.Column(db.Integer)
quantity = db.Column(db.Integer)
data = db.Column(JSONB(), default={})
variant = db.relationship("ProductVariant")
order = db.relationship("Order", overlaps="lines")
def dict(self):
return {
'id': self.id,
'variant': self.variant.dict(cart=True),
'quantity': self.quantity,
'data': self.data,
'total': self.total,
'quantity': self.quantity,
'is_shipping_required': self.is_shipping_required
}
@property
def is_shipping_required(self):
return self.variant.is_shipping_required
|
[
"[email protected]"
] | |
39765aad0f84ce97c089987f6a920f1900d8407c
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/tools/CrossStackProfiler/CspReporter.py
|
7ae672a2e99fd3f8e3f64c223b2fc2c9a0b3ecf5
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 |
Apache-2.0
| 2023-09-14T19:20:51 | 2016-08-15T06:59:08 |
C++
|
UTF-8
|
Python
| false | false | 8,467 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
from multiprocessing import Process
from CspFileReader import (
DCGM_PATH,
FILEORGANIZEFORM_BYRANK,
FILEORGANIZEFORM_BYTRAINER,
NET_PATH,
PROFILE_PATH,
TIME_PATH,
getLogger,
)
from DCGMFileReader import dcgmFileReader
from ProfileFileReader import profileFileReader
def get_argparse():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--profile_path',
type=str,
default='.',
help='Working path that store the monitor data.',
)
parser.add_argument(
'--timeline_path',
type=str,
default='.',
help='Output timeline file name.',
)
parser.add_argument(
'--gpuPerTrainer', type=int, default=8, help='Gpus per trainer.'
)
parser.add_argument(
'--trainerNum', type=int, default=4, help='Num of trainer.'
)
parser.add_argument(
'--groupSize', type=int, default=8, help='Num of trainer in a group.'
)
parser.add_argument(
'--displaySize',
type=int,
default=2,
help='Num of line need to display in a group.',
)
return parser.parse_args()
class CspReporter:
def __init__(self, args):
self._args = args
print(self._args)
self._workPath = self._args.profile_path
self._saveFilePath = self._args.timeline_path
self._gpuPerTrainer = self._args.gpuPerTrainer
self._groupSize = self._args.groupSize
self._displaySize = self._args.displaySize
self._trainerNum = self._args.trainerNum
self._checkArgs()
self._init_logger()
self._init_timeInfo()
self._init_reader()
def _checkArgs(self):
if self._trainerNum % self._groupSize != 0:
raise Exception(
"Input args error: trainerNum[%d] %% groupSize[%d] != 0"
% (self._trainerNum, self._groupSize)
)
def _init_logger(self):
self._logger = getLogger()
def _init_reader(self):
self._dcgmPath = os.path.join(self._workPath, DCGM_PATH)
self._netPath = os.path.join(self._workPath, NET_PATH)
self._profilePath = os.path.join(self._workPath, PROFILE_PATH)
self._netFileReaderArgs = {
"dataPath": self._netPath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYTRAINER,
}
self._dcgmFileReaderArgs = {
"dataPath": self._dcgmPath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYTRAINER,
}
self._profileFileReaderArgs = {
"dataPath": self._profilePath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYRANK,
}
self._dcgmFileReader = dcgmFileReader(
self._logger, self._dcgmFileReaderArgs
)
self._profileFileReader = profileFileReader(
self._logger, self._profileFileReaderArgs
)
def _init_timeInfo(self):
self._timePath = os.path.join(self._workPath, TIME_PATH)
self._timeInfo = {}
self._minTimeStamp = 0
self._set_timeInfo()
def _set_timeInfo(self, timeFileNamePrefix="time.txt", sed="."):
timeFileNameList = glob.glob(
os.path.join(self._timePath, timeFileNamePrefix, sed, "*")
)
for timeFileName in timeFileNameList:
trainerId = int(timeFileName.split(sed)[-1])
gpuId = int(timeFileName.split(sed)[-2])
info = {}
with open(timeFileName, "r") as rf:
for line in rf:
if line.startswith("start time:"):
info["start_time"] = int(
float(line.split(":")[-1]) * 1e9
)
self._minTimeStamp = min(
self._minTimeStamp, info["start_time"]
)
if line.startswith("end time:"):
info["end_time"] = int(float(line.split(":")[-1]) * 1e9)
if not info:
self._timeInfo[gpuId * trainerId] = info
def _generateTraceFileByGroupAndGpuId(
self, pipileInfo, netInfo, groupId, gpuId
):
dcgmInfoDict = self._dcgmFileReader.getDcgmInfoDict(groupId, gpuId)
opInfoDict = self._profileFileReader.getOpInfoDict(groupId, gpuId)
traceObj = {}
traceObj["traceEvents"] = (
pipileInfo[str(gpuId)]
+ opInfoDict["traceEvents"]
+ dcgmInfoDict["traceEvents"]
+ netInfo["traceEvents"]
)
self._profileFileReader.dumpDict(
traceObj, "traceFile", groupId, gpuId, False, self._saveFilePath
)
def _generateTraceFileByGroup(self, groupId, processNum):
# first we need to generate pipeline info
pipileInfo = self._profileFileReader.getPipeLineInfo(
groupId, processNum
)
# second we need to generate dcgm info
dcgmInfo = self._dcgmFileReader.getDCGMTraceInfo(groupId, processNum)
# third we need to generate net info
netInfo = {}
netInfo["traceEvents"] = []
# netInfo = self._netFileReader.parseFileByGroup(groupId, processNum)
# forth we need to generate op info
opInfo = self._profileFileReader.getOPTraceInfo(groupId)
# finally we need dump this information into disk
processPool = []
pidList = []
for gpuId in range(self._gpuPerTrainer):
subproc = Process(
target=self._generateTraceFileByGroupAndGpuId,
args=(
pipileInfo,
netInfo,
groupId,
gpuId,
),
)
processPool.append(subproc)
subproc.start()
pidList.append(subproc.pid)
self._logger.info(
"[traceFile]: process [%d] has been started, total task num is %d ..."
% (subproc.pid, 1)
)
for t in processPool:
t.join()
pidList.remove(t.pid)
self._logger.info(
"[traceFile]: process [%d] has exited! remained %d process!"
% (t.pid, len(pidList))
)
def generateTraceFile(self, processNum=8):
processPool = []
pidList = []
for groupId in range(self._trainerNum / self._groupSize):
subproc = Process(
target=self._generateTraceFileByGroup,
args=(
groupId,
processNum,
),
)
processPool.append(subproc)
subproc.start()
pidList.append(subproc.pid)
self._logger.info(
"[GroupTraceFile]: process [%d] has been started, total task num is %d ..."
% (subproc.pid, 1)
)
for t in processPool:
t.join()
pidList.remove(t.pid)
self._logger.info(
"[GroupTraceFile]: process [%d] has exited! remained %d process!"
% (t.pid, len(pidList))
)
if __name__ == '__main__':
args = get_argparse()
tl = CspReporter(args)
tl.generateTraceFile()
|
[
"[email protected]"
] | |
e881c66dc6ff89dbb21016ef1bd13a9edd91ae75
|
5e6155ad914b8bf67c0b8eb07baf12dccdcc0dbb
|
/data_sources/technology.py
|
b43b8372fa35160adc63a84bfee7ee1a5839ee79
|
[
"Apache-2.0"
] |
permissive
|
fabio-pintodacosta/gu-email-renderer
|
08cc85b06580768ba89c262f1633ffcbbfbf02a5
|
f9f62f6e8048cc0a6f79df734b6217f3dbcec8fa
|
refs/heads/master
| 2020-12-27T21:16:41.510316 | 2015-11-03T13:36:57 | 2015-11-03T13:36:57 | 39,622,245 | 0 | 0 | null | 2015-07-24T09:02:11 | 2015-07-24T09:02:11 | null |
UTF-8
|
Python
| false | false | 1,272 |
py
|
from data_source import DataSource, ItemDataSource, SearchDataSource
class TechnologyDataSource(ItemDataSource):
def __init__(self, client):
ItemDataSource.__init__(self, client, 'technology', show_editors_picks=True)
self.name = 'technology' + client.edition
self.show_tags = ['keyword']
self.show_elements = 'image'
def __repr__(self):
return str(self.__class__) + self.name
class TechnologyMostViewedDataSource(ItemDataSource):
def __init__(self, client):
ItemDataSource.__init__(self, client, content_id='technology', show_most_viewed=True)
class TechnologyGamesDataSource(ItemDataSource):
def __init__(self, client):
ItemDataSource.__init__(self, client, content_id='technology/games')
self.tags = ['-technology/series/chatterbox,-type/video']
class TechnologyPodcastDataSource(ItemDataSource):
def __init__(self, client):
ItemDataSource.__init__(self, client, content_id='technology')
self.tags = ['type/podcast']
class TechnologyVideoDataSource(SearchDataSource):
def __init__(self, client):
DataSource.__init__(self, client)
self.content_type = 'video'
self.tags = ['technology/technology']
self.show_elements = 'video'
|
[
"[email protected]"
] | |
03c9b8c1400c21f8f1f1f697eace517cba3fabce
|
f0b75bd94f133a13f469f429a696f26be3be9862
|
/week 2/.history/python_second_assignment_20200204154901.py
|
ca2a7780d18decaa9aca7b5410cab8eda6e90bd4
|
[] |
no_license
|
dechavez4/Python_handin_assignments
|
023350fabd212cdf2a4ee9cd301306dc5fd6bea0
|
82fd8c991e560c18ecb2152ea5a8fc35dfc3c608
|
refs/heads/master
| 2023-01-11T23:31:27.220757 | 2020-05-22T10:33:56 | 2020-05-22T10:33:56 | 237,179,899 | 0 | 0 | null | 2022-12-30T20:14:04 | 2020-01-30T09:30:16 |
Python
|
UTF-8
|
Python
| false | false | 1,447 |
py
|
# Exercise 1
# Create a python file with 3 functions:
# A. def print_file_content(file) that can print content of a csv file to the console
import csv
from sys import argv
import platform
filename = argv[1]
def print_file_content(file):
with open(filename) as csv_file:
content = csv_file.readlines()
for line in content[:20]:
print(line.strip().split(','))
# kan overskrive den gamle file.
# B. def write_list_to_file(output_file, lst) that can take a list of tuple and write each element to a new line in file
def write_list_to_file(output_file, *lst):
if platform.system() == 'Windows':
newline=''
else:
newline=None
with open (output_file, 'w', newline=newline) as output_file:
output_writer = csv.writer(output_file)
for ele in lst:
output_writer.writerow(ele)
# C. def read_csv(input_file) that take a csv file and read each row into a list
def read_line(file):
with open(file) as file_object:
lines = file_object.readlines()
print(lines)
for line in lines:
print(line.rstrip())
def main():
if argv[2] == 'print_file_content':
print_file_content(filename)
if argv[2] == 'write_list_to_file':
inputfield = argv[3:]
write_list_to_file(filename, inputfield)
if argv[2] == 'read_line':
read_line(filename)
def run():
if__name__ == '__main__':
run()
|
[
"[email protected]"
] | |
3f89fb97ec5363fc81efe42ce4a627e34436e809
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_suite.py
|
5d9059953940881ade58e572a6b7dde68f38bcfb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
#calss header
class _SUITE():
def __init__(self,):
self.name = "SUITE"
self.definitions = [u'a set of connected rooms, especially in a hotel: ', u'a set of furniture for one room, of matching design and colour: ', u'a piece of music with several parts, usually all in the same key', u'a set of related software (= computer program) products']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
517ceef7d532629cbd27d99cf7de9e4ef01c918a
|
27bdba388094658bd691d3f2947dc5bc265e8ddb
|
/frontend/website2/ownermanagement/ownermanagement/settings.py
|
cc136cd2b672194c5dda25482bcd171c55e17d17
|
[] |
no_license
|
ankithbala/blockchain
|
793042f67fce2c7cbc5808086204737f60aac21d
|
77fa2aab2fd201ad022672e96439112e2edb3cdd
|
refs/heads/master
| 2020-05-24T17:44:42.429929 | 2019-05-18T19:03:11 | 2019-05-18T19:03:11 | 187,393,377 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,307 |
py
|
"""
Django settings for ownermanagement project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yuska8hno9(w%oh7jh^m+$$omfn%=6d5fc-$*vusr7@xd93*4t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'ownerportal',
'baseportal',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ownermanagement.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ownermanagement.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
trufSettings=None
with open(os.path.join(BASE_DIR,"a.json")) as fp:
trufSettings=json.loads(fp.read())
print(trufSettings)
|
[
"[email protected]"
] | |
1bdd34e88fd6277b360b09b84201d96e1a50fe44
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/143_15.py
|
8d67528a49ea0ab7b49f24cfcb96309e98a02750
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,669 |
py
|
Python | Check if string ends with any string in given list
While working with strings, their prefixes and suffix play an important role
in making any decision. For data manipulation tasks, we may need to sometimes,
check if a string ends with any of the matching strings. Let’s discuss certain
ways in which this task can be performed.
**Method #1 : Usingfilter() + endswith()**
The combination of the above function can help to perform this particular
task. The filter method is used to check for each word and endswith method
tests for the suffix logic at target list.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using filter() + endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using filter() + endswith()
# Checking for string match suffix
res = list(filter(test_string.endswith, suff_list)) != []
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
**Method #2 : Usingendswith()**
As an improvement to the above method, it is not always necessary to include
filter method for comparison. This task can be handled solely by supplying a
suffix check list as an argument to endswith method as well.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using endswith()
# Checking for string match suffix
res = test_string.endswith(tuple(suff_list))
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"[email protected]"
] | |
d6df5d5c5c0f842ee9b27fe154a5370091ca3c8d
|
f2d494db4a736354e8f7705a0d94f4ba6104c911
|
/class_env/bin/pylint
|
e913b43385126dfe009821bcb89a1a525b427be1
|
[] |
no_license
|
Shankar1598/Training
|
6b40bde89bfd448429c4807a6e81a41c900c1ab1
|
3cc3bcbbd3fbb35118cd16e7e99ba532039e237a
|
refs/heads/master
| 2022-11-27T04:54:40.765334 | 2020-08-07T05:30:37 | 2020-08-07T05:30:37 | 285,745,792 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 250 |
#!/home/shankar/Desktop/Class/class_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
|
[
"[email protected]"
] | ||
f4b33a1d107c661005411ee377782495662a53f5
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/appcontainers/azure-mgmt-appcontainers/azure/mgmt/appcontainers/operations/_container_apps_operations.py
|
57696edccfe1612b68eccd942bdcaf4fd0b1173b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 |
MIT
| 2022-07-19T08:05:23 | 2018-11-16T22:15:30 |
Python
|
UTF-8
|
Python
| false | false | 62,915 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_custom_host_name_analysis_request(
resource_group_name: str,
container_app_name: str,
subscription_id: str,
*,
custom_hostname: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if custom_hostname is not None:
_params["customHostname"] = _SERIALIZER.query("custom_hostname", custom_hostname, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_secrets_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listSecrets",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_auth_token_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/getAuthtoken",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class ContainerAppsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appcontainers.ContainerAppsAPIClient`'s
:attr:`container_apps` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.ContainerApp"]:
"""Get the Container Apps in a given subscription.
Get the Container Apps in a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerApp or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps"}
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.ContainerApp"]:
"""Get the Container Apps in a given resource group.
Get the Container Apps in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerApp or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps"
}
@distributed_trace
def get(self, resource_group_name: str, container_app_name: str, **kwargs: Any) -> _models.ContainerApp:
"""Get the properties of a Container App.
Get the properties of a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerApp or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.ContainerApp
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
404: lambda response: ResourceNotFoundError(response=response, error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _create_or_update_initial(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> _models.ContainerApp:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(container_app_envelope, (IO, bytes)):
_content = container_app_envelope
else:
_json = self._serialize.body(container_app_envelope, "ContainerApp")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: _models.ContainerApp,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Required.
:type container_app_envelope: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Is either a model
type or a IO type. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
container_app_envelope=container_app_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@distributed_trace
def begin_delete(self, resource_group_name: str, container_app_name: str, **kwargs: Any) -> LROPoller[None]:
"""Delete a Container App.
Delete a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
container_app_name=container_app_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _update_initial(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> Optional[_models.ContainerApp]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ContainerApp]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(container_app_envelope, (IO, bytes)):
_content = container_app_envelope
else:
_json = self._serialize.body(container_app_envelope, "ContainerApp")
request = build_update_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: _models.ContainerApp,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Required.
:type container_app_envelope: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Is either
a model type or a IO type. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
container_app_envelope=container_app_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@distributed_trace
def list_custom_host_name_analysis(
self, resource_group_name: str, container_app_name: str, custom_hostname: Optional[str] = None, **kwargs: Any
) -> _models.CustomHostnameAnalysisResult:
"""Analyzes a custom hostname for a Container App.
Analyzes a custom hostname for a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param custom_hostname: Custom hostname. Default value is None.
:type custom_hostname: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomHostnameAnalysisResult or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.CustomHostnameAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.CustomHostnameAnalysisResult] = kwargs.pop("cls", None)
request = build_list_custom_host_name_analysis_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
custom_hostname=custom_hostname,
api_version=api_version,
template_url=self.list_custom_host_name_analysis.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CustomHostnameAnalysisResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_custom_host_name_analysis.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis"
}
@distributed_trace
def list_secrets(
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> _models.SecretsCollection:
"""List secrets for a container app.
List secrets for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretsCollection or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.SecretsCollection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.SecretsCollection] = kwargs.pop("cls", None)
request = build_list_secrets_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("SecretsCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_secrets.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listSecrets"
}
@distributed_trace
def get_auth_token(
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> _models.ContainerAppAuthToken:
"""Get auth token for a container app.
Get auth token for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerAppAuthToken or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.ContainerAppAuthToken
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
404: lambda response: ResourceNotFoundError(response=response, error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppAuthToken] = kwargs.pop("cls", None)
request = build_get_auth_token_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_auth_token.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ContainerAppAuthToken", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_auth_token.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/getAuthtoken"
}
|
[
"[email protected]"
] | |
6d480975a561d743abbf7d61a83e83bc554fdf14
|
9438cb7f1dc8e237eab174ebcfb24694d6067146
|
/db_manager.py
|
efec3bed7ddc298e522bcfea2e5313a161fc85ab
|
[] |
no_license
|
BraweGit/Malware-scanner
|
84c440dfd269dc7d361571a732c86ec7fad98b3d
|
1dec369f2ac122ef593f13090e5041f07fec6939
|
refs/heads/master
| 2020-03-26T18:42:04.420290 | 2018-08-18T15:22:42 | 2018-08-18T15:22:42 | 145,225,942 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,286 |
py
|
import json
from pathlib import Path
from tinydb import TinyDB, Query
from tinydb import where
class DBManager:
""" Provides basic functionality for communication with local JSON database.
Creating database. Inserting new records. Updating existing records.
Or selecting record by its MD5 hash.
"""
def __init__(self):
self.Name = "mydb.json"
self.db = self.create_db()
self.db_check()
self.records = []
def get_malicious(self):
malicious = []
for record in self.records:
if record["response_code"] == 1 and record["positives"] > 0:
malicious.append(record)
return malicious
def reset_records(self):
self.records = []
def cache_records(self, records):
self.records.extend(records)
def cache_record(self, record):
if isinstance(record, list):
if not record[0] in self.records:
self.records.append(record[0])
else:
if not record in self.records:
self.records.append(record)
def db_check(self):
"""Check if db exists.
If not, create new db file.
"""
if not Path(self.Name).is_file():
self.create_db()
def create_db(self):
"""Creates new db .json file.
"""
try:
return TinyDB(self.Name)
except IOError:
print("Error! Unable to create database file!")
def add_records(self, to_insert_list):
for record in to_insert_list:
if record["response_code"] == 1:
self.add_record(record)
self.cache_record(record)
def add_record(self, toInsert):
""" If a record already exists update existing one.
If record doesn't exist, insert a new one.
Args:
toInsert(dictionary): JSON dictionary
"""
# Check if db exists, if not create a new one.
self.db_check()
# Check if record already exists.
# If not insert it.
# If yes, delete the old one and replace it with the new one.
try:
item = self.db.search(where("md5") == toInsert["md5"])
if not item:
self.db.insert(toInsert)
else:
self.db.remove(where("md5") == toInsert["md5"])
self.db.insert(toInsert)
except IOError:
print("Error! Unable communicate with the database!")
def get_not_analyzed(self, files_dict):
not_analyzed = []
for key, value in files_dict.items():
if not self.get_record(value):
not_analyzed.append(key)
return not_analyzed
def get_record(self, md5):
"""Gets a record from the DB.
Args:
md5(string): MD5 hash of the record.
Returns:
dictionary: JSON dictionary.
"""
# Check if db exists, if not create a new one.
self.db_check()
try:
item = self.db.search(where("md5") == md5)
except IOError:
print("Error! Unable to read from the database!")
if item:
return item
return None
|
[
"[email protected]"
] | |
07e129d0519352914bd0d2a4d1e1c026b289bc19
|
58e7a18c0dff5af960934e8f436a2021a88db39b
|
/src/useful/computeCoverage.py
|
25102709b110a974f4bbdce376dd268f840b21a7
|
[] |
no_license
|
nluhmann/EWRA
|
a8e09c1b38548bf04f0c0eabc970b34b26551337
|
2aa2b77b42d6653acf23a7d035de409758d1e479
|
refs/heads/master
| 2021-01-21T20:53:25.262195 | 2018-01-23T17:38:36 | 2018-01-23T17:38:36 | 94,756,469 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 738 |
py
|
#!/usr/bin/env python
import sys
markerfile import sys
families = sys.argv[2]
file = open(markerfile, "r")
markerset = []
for line in file:
if len(line) != 0:
if line[0] != "N":
array = line.replace('\n', '').split(' ')
markerset.extend(array)
file.close()
familyHash = {}
filef = open(families, "r")
for line in filef:
if line.startswith('>'):
id = line[1:].replace('\n', '')
elif len(line) > 1:
array = line.split(':')
pos = array[2].split('-')
length = abs(int(pos[1])-int(pos[0]))
familyHash[id] = length
filef.close()
lengthCounter = 0
for marker in markerset:
if marker != '':
length = familyHash[marker[1:]]
lengthCounter = lengthCounter + length
print "Total marker length: "+str(lengthCounter)
|
[
"[email protected]"
] | |
dea5643daac0785ee31f186a7cbdf1027b9724b6
|
fe04f5ceef3447f9e8c3ada52c1173808fdb9360
|
/inventory/dashboard/urls.py
|
ac361874ac2d28db9c9d1c71f4ab76022d85bd17
|
[] |
no_license
|
GuardME/inventory
|
289c6d2b059500aa181d0a66bf598d108d1facb0
|
b8cab5d1179b99e14068674aee7b87147bcb06cf
|
refs/heads/master
| 2023-08-23T22:30:24.183684 | 2021-10-25T09:49:55 | 2021-10-25T09:49:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
from django.urls import path
from dashboard import apps
from . import views
urlpatterns = [
path('dashboard', views.index, name='dashboard-index'),
path('staff/', views.staff, name="dashboard-staff"),
path('staff/detail/<int:pk>', views.staff_detail, name="dashboard-staff-detail"),
path('product/', views.product, name="dashboard-product"),
path('product/delete/<int:pk>/', views.delete_product, name="dashboard-product-delete"),
path('product/update/<int:pk>/', views.update_product, name="dashboard-product-edit"),
path('order/', views.order, name='dashboard-order'),
]
|
[
"[email protected]"
] | |
17f68468ed1bec0220733058ab02887e08228ee7
|
1e8c4293d02cd72297eb80aab776cc8dffb71690
|
/Data Types and Variables/10.Gladiator Expenses.py
|
2eee653802d2b6b8caa110d2fcbf652bf9b1e498
|
[] |
no_license
|
deyandyankov1/Fundamentals
|
cacdf8f4c9aeee02ffd5f91ba5494f6c4cdc504a
|
0152e8f307a44b7ee355a4020405e6e1e42ab1e6
|
refs/heads/main
| 2023-02-25T11:36:14.840005 | 2021-02-02T10:46:44 | 2021-02-02T10:46:44 | 330,765,629 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 697 |
py
|
lost_fights_count = int(input())
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
sum_for_repair = 0
broken_shield_count = 0
for lost_fight in range(1, lost_fights_count +1):
if lost_fight % 2 == 0:
sum_for_repair += helmet_price
if lost_fight % 3 == 0:
sum_for_repair += sword_price
if lost_fight % 2 == 0 and lost_fight % 3 == 0:
sum_for_repair += shield_price
broken_shield_count += 1
if broken_shield_count % 2 == 0 and not broken_shield_count == 0:
sum_for_repair += armor_price
print(f"Gladiator expenses: {sum_for_repair:.2f} aureus")
|
[
"[email protected]"
] | |
f913ae905247b8e0dff1b76960b7bc3560d658f1
|
f87c90c4721a0855980d51e8b65d8cfa5e37664b
|
/tools/releasetools/common.py
|
f4f67eaa9e27dedd3cdf2545a9ef809b505f5015
|
[
"Apache-2.0"
] |
permissive
|
Ankits-lab/build
|
493ab6fe9c025621e22475e85af320a3330216df
|
59c4a076a04708b36ffff0c81429bed3fb121370
|
refs/heads/main
| 2023-01-13T20:48:59.399800 | 2020-11-14T08:53:57 | 2020-11-14T08:53:57 | 312,779,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 124,331 |
py
|
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import collections
import copy
import errno
import fnmatch
import getopt
import getpass
import gzip
import imp
import json
import logging
import logging.config
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import zipfile
from hashlib import sha1, sha256
import images
import sparse_img
from blockimgdiff import BlockImageDiff
logger = logging.getLogger(__name__)
class Options(object):
def __init__(self):
# Set up search path, in order to find framework/ and lib64/. At the time of
# running this function, user-supplied search path (`--path`) hasn't been
# available. So the value set here is the default, which might be overridden
# by commandline flag later.
exec_path = sys.argv[0]
if exec_path.endswith('.py'):
script_name = os.path.basename(exec_path)
# logger hasn't been initialized yet at this point. Use print to output
# warnings.
print(
'Warning: releasetools script should be invoked as hermetic Python '
'executable -- build and run `{}` directly.'.format(script_name[:-3]),
file=sys.stderr)
self.search_path = os.path.realpath(os.path.join(os.path.dirname(exec_path), '..'))
self.signapk_path = "framework/signapk.jar" # Relative to search_path
self.signapk_shared_library_path = "lib64" # Relative to search_path
self.extra_signapk_args = []
self.java_path = "java" # Use the one on the path by default.
self.java_args = ["-Xmx2048m"] # The default JVM args.
self.android_jar_path = None
self.public_key_suffix = ".x509.pem"
self.private_key_suffix = ".pk8"
# use otatools built boot_signer by default
self.boot_signer_path = "boot_signer"
self.boot_signer_args = []
self.verity_signer_path = None
self.verity_signer_args = []
self.aftl_server = None
self.aftl_key_path = None
self.aftl_manufacturer_key_path = None
self.aftl_signer_helper = None
self.verbose = False
self.tempfiles = []
self.device_specific = None
self.extras = {}
self.info_dict = None
self.source_info_dict = None
self.target_info_dict = None
self.worker_threads = None
# Stash size cannot exceed cache_size * threshold.
self.cache_size = None
self.stash_threshold = 0.8
self.logfile = None
OPTIONS = Options()
# The block size that's used across the releasetools scripts.
BLOCK_SIZE = 4096
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
# that system_other is not in the list because we don't want to include its
# descriptor into vbmeta.img.
AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'recovery', 'system',
'system_ext', 'vendor', 'vendor_boot')
# Chained VBMeta partitions.
AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
# Partitions that should have their care_map added to META/care_map.pb
PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'system_ext', 'odm')
class ErrorCode(object):
"""Define error_codes for failures that happen during the actual
update package installation.
Error codes 0-999 are reserved for failures before the package
installation (i.e. low battery, package verification failure).
Detailed code in 'bootable/recovery/error_code.h' """
SYSTEM_VERIFICATION_FAILURE = 1000
SYSTEM_UPDATE_FAILURE = 1001
SYSTEM_UNEXPECTED_CONTENTS = 1002
SYSTEM_NONZERO_CONTENTS = 1003
SYSTEM_RECOVER_FAILURE = 1004
VENDOR_VERIFICATION_FAILURE = 2000
VENDOR_UPDATE_FAILURE = 2001
VENDOR_UNEXPECTED_CONTENTS = 2002
VENDOR_NONZERO_CONTENTS = 2003
VENDOR_RECOVER_FAILURE = 2004
OEM_PROP_MISMATCH = 3000
FINGERPRINT_MISMATCH = 3001
THUMBPRINT_MISMATCH = 3002
OLDER_BUILD = 3003
DEVICE_MISMATCH = 3004
BAD_PATCH_FILE = 3005
INSUFFICIENT_CACHE_SPACE = 3006
TUNE_PARTITION_FAILURE = 3007
APPLY_PATCH_FAILURE = 3008
class ExternalError(RuntimeError):
pass
def InitLogging():
DEFAULT_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format':
'%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'default': {
'class': 'logging.StreamHandler',
'formatter': 'standard',
'level': 'WARNING',
},
},
'loggers': {
'': {
'handlers': ['default'],
'propagate': True,
'level': 'INFO',
}
}
}
env_config = os.getenv('LOGGING_CONFIG')
if env_config:
with open(env_config) as f:
config = json.load(f)
else:
config = DEFAULT_LOGGING_CONFIG
# Increase the logging level for verbose mode.
if OPTIONS.verbose:
config = copy.deepcopy(config)
config['handlers']['default']['level'] = 'INFO'
if OPTIONS.logfile:
config = copy.deepcopy(config)
config['handlers']['logfile'] = {
'class': 'logging.FileHandler',
'formatter': 'standard',
'level': 'INFO',
'mode': 'w',
'filename': OPTIONS.logfile,
}
config['loggers']['']['handlers'].append('logfile')
logging.config.dictConfig(config)
def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object.
Args:
args: The command represented as a list of strings.
verbose: Whether the commands should be shown. Default to the global
verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
universal_newlines will default to True, as most of the users in
releasetools expect string output.
Returns:
A subprocess.Popen object.
"""
if 'stdout' not in kwargs and 'stderr' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
if 'universal_newlines' not in kwargs:
kwargs['universal_newlines'] = True
# Don't log any if caller explicitly says so.
if verbose != False:
logger.info(" Running: \"%s\"", " ".join(args))
return subprocess.Popen(args, **kwargs)
def RunAndWait(args, verbose=None, **kwargs):
"""Runs the given command waiting for it to complete.
Args:
args: The command represented as a list of strings.
verbose: Whether the commands should be shown. Default to the global
verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
Raises:
ExternalError: On non-zero exit from the command.
"""
proc = Run(args, verbose=verbose, **kwargs)
proc.wait()
if proc.returncode != 0:
raise ExternalError(
"Failed to run command '{}' (exit code {})".format(
args, proc.returncode))
def RunAndCheckOutput(args, verbose=None, **kwargs):
"""Runs the given command and returns the output.
Args:
args: The command represented as a list of strings.
verbose: Whether the commands should be shown. Default to the global
verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them.
Returns:
The output string.
Raises:
ExternalError: On non-zero exit from the command.
"""
proc = Run(args, verbose=verbose, **kwargs)
output, _ = proc.communicate()
if output is None:
output = ""
# Don't log any if caller explicitly says so.
if verbose != False:
logger.info("%s", output.rstrip())
if proc.returncode != 0:
raise ExternalError(
"Failed to run command '{}' (exit code {}):\n{}".format(
args, proc.returncode, output))
return output
def RoundUpTo4K(value):
rounded_up = value + 4095
return rounded_up - (rounded_up % 4096)
def CloseInheritedPipes():
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
before doing other work."""
if platform.system() != "Darwin":
return
for d in range(3, 1025):
try:
stat = os.fstat(d)
if stat is not None:
pipebit = stat[0] & 0x1000
if pipebit != 0:
os.close(d)
except OSError:
pass
class BuildInfo(object):
"""A class that holds the information for a given build.
This class wraps up the property querying for a given source or target build.
It abstracts away the logic of handling OEM-specific properties, and caches
the commonly used properties such as fingerprint.
There are two types of info dicts: a) build-time info dict, which is generated
at build time (i.e. included in a target_files zip); b) OEM info dict that is
specified at package generation time (via command line argument
'--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
having "oem_fingerprint_properties" in build-time info dict), all the queries
would be answered based on build-time info dict only. Otherwise if using
OEM-specific properties, some of them will be calculated from two info dicts.
Users can query properties similarly as using a dict() (e.g. info['fstab']),
or to query build properties via GetBuildProp() or GetPartitionBuildProp().
Attributes:
info_dict: The build-time info dict.
is_ab: Whether it's a build that uses A/B OTA.
oem_dicts: A list of OEM dicts.
oem_props: A list of OEM properties that should be read from OEM dicts; None
if the build doesn't use any OEM-specific property.
fingerprint: The fingerprint of the build, which would be calculated based
on OEM properties if applicable.
device: The device name, which could come from OEM dicts if applicable.
"""
_RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
"ro.product.manufacturer", "ro.product.model",
"ro.product.name"]
_RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
"product", "odm", "vendor", "system_ext", "system"]
_RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
"product", "product_services", "odm", "vendor", "system"]
_RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
def __init__(self, info_dict, oem_dicts=None):
"""Initializes a BuildInfo instance with the given dicts.
Note that it only wraps up the given dicts, without making copies.
Arguments:
info_dict: The build-time info dict.
oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
that it always uses the first dict to calculate the fingerprint or the
device name. The rest would be used for asserting OEM properties only
(e.g. one package can be installed on one of these devices).
Raises:
ValueError: On invalid inputs.
"""
self.info_dict = info_dict
self.oem_dicts = oem_dicts
self._is_ab = info_dict.get("ab_update") == "true"
# Skip _oem_props if oem_dicts is None to use BuildInfo in
# sign_target_files_apks
if self.oem_dicts:
self._oem_props = info_dict.get("oem_fingerprint_properties")
else:
self._oem_props = None
def check_fingerprint(fingerprint):
if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
raise ValueError(
'Invalid build fingerprint: "{}". See the requirement in Android CDD '
"3.2.2. Build Parameters.".format(fingerprint))
self._partition_fingerprints = {}
for partition in PARTITIONS_WITH_CARE_MAP:
try:
fingerprint = self.CalculatePartitionFingerprint(partition)
check_fingerprint(fingerprint)
self._partition_fingerprints[partition] = fingerprint
except ExternalError:
continue
if "system" in self._partition_fingerprints:
# system_other is not included in PARTITIONS_WITH_CARE_MAP, but does
# need a fingerprint when creating the image.
self._partition_fingerprints[
"system_other"] = self._partition_fingerprints["system"]
# These two should be computed only after setting self._oem_props.
self._device = info_dict.get("ota_override_device", self.GetOemProperty("ro.product.device"))
self._fingerprint = self.CalculateFingerprint()
check_fingerprint(self._fingerprint)
@property
def is_ab(self):
return self._is_ab
@property
def device(self):
return self._device
@property
def fingerprint(self):
return self._fingerprint
@property
def oem_props(self):
return self._oem_props
def __getitem__(self, key):
return self.info_dict[key]
def __setitem__(self, key, value):
self.info_dict[key] = value
def get(self, key, default=None):
return self.info_dict.get(key, default)
def items(self):
return self.info_dict.items()
def _GetRawBuildProp(self, prop, partition):
prop_file = '{}.build.prop'.format(
partition) if partition else 'build.prop'
partition_props = self.info_dict.get(prop_file)
if not partition_props:
return None
return partition_props.GetProp(prop)
def GetPartitionBuildProp(self, prop, partition):
"""Returns the inquired build property for the provided partition."""
# If provided a partition for this property, only look within that
# partition's build.prop.
if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
prop = prop.replace("ro.product", "ro.product.{}".format(partition))
else:
prop = prop.replace("ro.", "ro.{}.".format(partition))
prop_val = self._GetRawBuildProp(prop, partition)
if prop_val is not None:
return prop_val
raise ExternalError("couldn't find %s in %s.build.prop" %
(prop, partition))
def GetBuildProp(self, prop):
"""Returns the inquired build property from the standard build.prop file."""
if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
return self._ResolveRoProductBuildProp(prop)
prop_val = self._GetRawBuildProp(prop, None)
if prop_val is not None:
return prop_val
raise ExternalError("couldn't find %s in build.prop" % (prop,))
def _ResolveRoProductBuildProp(self, prop):
"""Resolves the inquired ro.product.* build property"""
prop_val = self._GetRawBuildProp(prop, None)
if prop_val:
return prop_val
default_source_order = self._GetRoProductPropsDefaultSourceOrder()
source_order_val = self._GetRawBuildProp(
"ro.product.property_source_order", None)
if source_order_val:
source_order = source_order_val.split(",")
else:
source_order = default_source_order
# Check that all sources in ro.product.property_source_order are valid
if any([x not in default_source_order for x in source_order]):
raise ExternalError(
"Invalid ro.product.property_source_order '{}'".format(source_order))
for source_partition in source_order:
source_prop = prop.replace(
"ro.product", "ro.product.{}".format(source_partition), 1)
prop_val = self._GetRawBuildProp(source_prop, source_partition)
if prop_val:
return prop_val
raise ExternalError("couldn't resolve {}".format(prop))
def _GetRoProductPropsDefaultSourceOrder(self):
# NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
# values of these properties for each Android release.
android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
if android_codename == "REL":
android_version = self._GetRawBuildProp("ro.build.version.release", None)
if android_version == "10":
return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
# NOTE: float() conversion of android_version will have rounding error.
# We are checking for "9" or less, and using "< 10" is well outside of
# possible floating point rounding.
try:
android_version_val = float(android_version)
except ValueError:
android_version_val = 0
if android_version_val < 10:
return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
def GetOemProperty(self, key):
if self.oem_props is not None and key in self.oem_props:
return self.oem_dicts[0][key]
return self.GetBuildProp(key)
def GetPartitionFingerprint(self, partition):
return self._partition_fingerprints.get(partition, None)
def CalculatePartitionFingerprint(self, partition):
try:
return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
except ExternalError:
return "{}/{}/{}:{}/{}/{}:{}/{}".format(
self.GetPartitionBuildProp("ro.product.brand", partition),
self.GetPartitionBuildProp("ro.product.name", partition),
self.GetPartitionBuildProp("ro.product.device", partition),
self.GetPartitionBuildProp("ro.build.version.release", partition),
self.GetPartitionBuildProp("ro.build.id", partition),
self.GetPartitionBuildProp("ro.build.version.incremental", partition),
self.GetPartitionBuildProp("ro.build.type", partition),
self.GetPartitionBuildProp("ro.build.tags", partition))
def CalculateFingerprint(self):
if self.oem_props is None:
try:
return self.GetBuildProp("ro.build.fingerprint")
except ExternalError:
return "{}/{}/{}:{}/{}/{}:{}/{}".format(
self.GetBuildProp("ro.product.brand"),
self.GetBuildProp("ro.product.name"),
self.GetBuildProp("ro.product.device"),
self.GetBuildProp("ro.build.version.release"),
self.GetBuildProp("ro.build.id"),
self.GetBuildProp("ro.build.version.incremental"),
self.GetBuildProp("ro.build.type"),
self.GetBuildProp("ro.build.tags"))
return "%s/%s/%s:%s" % (
self.GetOemProperty("ro.product.brand"),
self.GetOemProperty("ro.product.name"),
self.GetOemProperty("ro.product.device"),
self.GetBuildProp("ro.build.thumbprint"))
def WriteMountOemScript(self, script):
assert self.oem_props is not None
recovery_mount_options = self.info_dict.get("recovery_mount_options")
script.Mount("/oem", recovery_mount_options)
def WriteDeviceAssertions(self, script, oem_no_mount):
# Read the property directly if not using OEM properties.
if not self.oem_props:
script.AssertDevice(self.device)
return
# Otherwise assert OEM properties.
if not self.oem_dicts:
raise ExternalError(
"No OEM file provided to answer expected assertions")
for prop in self.oem_props.split():
values = []
for oem_dict in self.oem_dicts:
if prop in oem_dict:
values.append(oem_dict[prop])
if not values:
raise ExternalError(
"The OEM file is missing the property %s" % (prop,))
script.AssertOemProperty(prop, values, oem_no_mount)
def ReadFromInputFile(input_file, fn):
"""Reads the contents of fn from input zipfile or directory."""
if isinstance(input_file, zipfile.ZipFile):
return input_file.read(fn).decode()
else:
path = os.path.join(input_file, *fn.split("/"))
try:
with open(path) as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise KeyError(fn)
def LoadInfoDict(input_file, repacking=False):
"""Loads the key/value pairs from the given input target_files.
It reads `META/misc_info.txt` file in the target_files input, does sanity
checks and returns the parsed key/value pairs for to the given build. It's
usually called early when working on input target_files files, e.g. when
generating OTAs, or signing builds. Note that the function may be called
against an old target_files file (i.e. from past dessert releases). So the
property parsing needs to be backward compatible.
In a `META/misc_info.txt`, a few properties are stored as links to the files
in the PRODUCT_OUT directory. It works fine with the build system. However,
they are no longer available when (re)generating images from target_files zip.
When `repacking` is True, redirect these properties to the actual files in the
unzipped directory.
Args:
input_file: The input target_files file, which could be an open
zipfile.ZipFile instance, or a str for the dir that contains the files
unzipped from a target_files file.
repacking: Whether it's trying repack an target_files file after loading the
info dict (default: False). If so, it will rewrite a few loaded
properties (e.g. selinux_fc, root_dir) to point to the actual files in
target_files file. When doing repacking, `input_file` must be a dir.
Returns:
A dict that contains the parsed key/value pairs.
Raises:
AssertionError: On invalid input arguments.
ValueError: On malformed input values.
"""
if repacking:
assert isinstance(input_file, str), \
"input_file must be a path str when doing repacking"
def read_helper(fn):
return ReadFromInputFile(input_file, fn)
try:
d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
except KeyError:
raise ValueError("Failed to find META/misc_info.txt in input target-files")
if "recovery_api_version" not in d:
raise ValueError("Failed to find 'recovery_api_version'")
if "fstab_version" not in d:
raise ValueError("Failed to find 'fstab_version'")
if repacking:
# "selinux_fc" properties should point to the file_contexts files
# (file_contexts.bin) under META/.
for key in d:
if key.endswith("selinux_fc"):
fc_basename = os.path.basename(d[key])
fc_config = os.path.join(input_file, "META", fc_basename)
assert os.path.exists(fc_config)
d[key] = fc_config
# Similarly we need to redirect "root_dir", and "root_fs_config".
d["root_dir"] = os.path.join(input_file, "ROOT")
d["root_fs_config"] = os.path.join(
input_file, "META", "root_filesystem_config.txt")
# Redirect {partition}_base_fs_file for each of the named partitions.
for part_name in ["system", "vendor", "system_ext", "product", "odm"]:
key_name = part_name + "_base_fs_file"
if key_name not in d:
continue
basename = os.path.basename(d[key_name])
base_fs_file = os.path.join(input_file, "META", basename)
if os.path.exists(base_fs_file):
d[key_name] = base_fs_file
else:
logger.warning(
"Failed to find %s base fs file: %s", part_name, base_fs_file)
del d[key_name]
def makeint(key):
if key in d:
d[key] = int(d[key], 0)
makeint("recovery_api_version")
makeint("blocksize")
makeint("system_size")
makeint("vendor_size")
makeint("userdata_size")
makeint("cache_size")
makeint("recovery_size")
makeint("fstab_version")
boot_images = "boot.img"
if "boot_images" in d:
boot_images = d["boot_images"]
for b in boot_images.split():
makeint(b.replace(".img","_size"))
# Load recovery fstab if applicable.
d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
# Tries to load the build props for all partitions with care_map, including
# system and vendor.
for partition in PARTITIONS_WITH_CARE_MAP:
partition_prop = "{}.build.prop".format(partition)
d[partition_prop] = PartitionBuildProps.FromInputFile(
input_file, partition)
d["build.prop"] = d["system.build.prop"]
# Set up the salt (based on fingerprint) that will be used when adding AVB
# hash / hashtree footers.
if d.get("avb_enable") == "true":
build_info = BuildInfo(d)
for partition in PARTITIONS_WITH_CARE_MAP:
fingerprint = build_info.GetPartitionFingerprint(partition)
if fingerprint:
d["avb_{}_salt".format(partition)] = sha256(fingerprint).hexdigest()
return d
def LoadListFromFile(file_path):
with open(file_path) as f:
return f.read().splitlines()
def LoadDictionaryFromFile(file_path):
lines = LoadListFromFile(file_path)
return LoadDictionaryFromLines(lines)
def LoadDictionaryFromLines(lines):
d = {}
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if "=" in line:
name, value = line.split("=", 1)
d[name] = value
return d
class PartitionBuildProps(object):
"""The class holds the build prop of a particular partition.
This class loads the build.prop and holds the build properties for a given
partition. It also partially recognizes the 'import' statement in the
build.prop; and calculates alternative values of some specific build
properties during runtime.
Attributes:
input_file: a zipped target-file or an unzipped target-file directory.
partition: name of the partition.
props_allow_override: a list of build properties to search for the
alternative values during runtime.
build_props: a dict of build properties for the given partition.
prop_overrides: a set of props that are overridden by import.
placeholder_values: A dict of runtime variables' values to replace the
placeholders in the build.prop file. We expect exactly one value for
each of the variables.
"""
def __init__(self, input_file, name, placeholder_values=None):
self.input_file = input_file
self.partition = name
self.props_allow_override = [props.format(name) for props in [
'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
self.build_props = {}
self.prop_overrides = set()
self.placeholder_values = {}
if placeholder_values:
self.placeholder_values = copy.deepcopy(placeholder_values)
@staticmethod
def FromDictionary(name, build_props):
"""Constructs an instance from a build prop dictionary."""
props = PartitionBuildProps("unknown", name)
props.build_props = build_props.copy()
return props
@staticmethod
def FromInputFile(input_file, name, placeholder_values=None):
"""Loads the build.prop file and builds the attributes."""
data = ''
for prop_file in ['{}/etc/build.prop'.format(name.upper()),
'{}/build.prop'.format(name.upper())]:
try:
data = ReadFromInputFile(input_file, prop_file)
break
except KeyError:
logger.warning('Failed to read %s', prop_file)
props = PartitionBuildProps(input_file, name, placeholder_values)
props._LoadBuildProp(data)
return props
def _LoadBuildProp(self, data):
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("#"):
continue
if line.startswith("import"):
overrides = self._ImportParser(line)
duplicates = self.prop_overrides.intersection(overrides.keys())
if duplicates:
raise ValueError('prop {} is overridden multiple times'.format(
','.join(duplicates)))
self.prop_overrides = self.prop_overrides.union(overrides.keys())
self.build_props.update(overrides)
elif "=" in line:
name, value = line.split("=", 1)
if name in self.prop_overrides:
raise ValueError('prop {} is set again after overridden by import '
'statement'.format(name))
self.build_props[name] = value
def _ImportParser(self, line):
"""Parses the build prop in a given import statement."""
tokens = line.split()
if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3) :
raise ValueError('Unrecognized import statement {}'.format(line))
if len(tokens) == 3:
logger.info("Import %s from %s, skip", tokens[2], tokens[1])
return {}
import_path = tokens[1]
if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
raise ValueError('Unrecognized import path {}'.format(line))
# We only recognize a subset of import statement that the init process
# supports. And we can loose the restriction based on how the dynamic
# fingerprint is used in practice. The placeholder format should be
# ${placeholder}, and its value should be provided by the caller through
# the placeholder_values.
for prop, value in self.placeholder_values.items():
prop_place_holder = '${{{}}}'.format(prop)
if prop_place_holder in import_path:
import_path = import_path.replace(prop_place_holder, value)
if '$' in import_path:
logger.info('Unresolved place holder in import path %s', import_path)
return {}
import_path = import_path.replace('/{}'.format(self.partition),
self.partition.upper())
logger.info('Parsing build props override from %s', import_path)
lines = ReadFromInputFile(self.input_file, import_path).split('\n')
d = LoadDictionaryFromLines(lines)
return {key: val for key, val in d.items()
if key in self.props_allow_override}
def GetProp(self, prop):
return self.build_props.get(prop)
def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
system_root_image=False):
class Partition(object):
def __init__(self, mount_point, fs_type, device, length, context, slotselect):
self.mount_point = mount_point
self.fs_type = fs_type
self.device = device
self.length = length
self.context = context
self.slotselect = slotselect
try:
data = read_helper(recovery_fstab_path)
except KeyError:
logger.warning("Failed to find %s", recovery_fstab_path)
data = ""
assert fstab_version == 2
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
# <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
pieces = line.split()
if len(pieces) != 5:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
# Ignore entries that are managed by vold.
options = pieces[4]
if "voldmanaged=" in options:
continue
# It's a good line, parse it.
length = 0
slotselect = False
options = options.split(",")
for i in options:
if i.startswith("length="):
length = int(i[7:])
elif i == "slotselect":
slotselect = True
else:
# Ignore all unknown options in the unified fstab.
continue
mount_flags = pieces[3]
# Honor the SELinux context if present.
context = None
for i in mount_flags.split(","):
if i.startswith("context="):
context = i
mount_point = pieces[1]
if not d.get(mount_point):
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
device=pieces[0], length=length, context=context,
slotselect=slotselect)
# / is used for the system mount point when the root directory is included in
# system. Other areas assume system is always at "/system" so point /system
# at /.
if system_root_image:
assert '/system' not in d and '/' in d
d["/system"] = d["/"]
return d
def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
"""Finds the path to recovery fstab and loads its contents."""
# recovery fstab is only meaningful when installing an update via recovery
# (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
if info_dict.get('ab_update') == 'true' and \
info_dict.get("allow_non_ab") != "true":
return None
# We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
# ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
# cases, since it may load the info_dict from an old build (e.g. when
# generating incremental OTAs from that build).
system_root_image = info_dict.get('system_root_image') == 'true'
if info_dict.get('no_recovery') != 'true':
recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
if isinstance(input_file, zipfile.ZipFile):
if recovery_fstab_path not in input_file.namelist():
recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
else:
path = os.path.join(input_file, *recovery_fstab_path.split('/'))
if not os.path.exists(path):
recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
return LoadRecoveryFSTab(
read_helper, info_dict['fstab_version'], recovery_fstab_path,
system_root_image)
if info_dict.get('recovery_as_boot') == 'true':
recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
if isinstance(input_file, zipfile.ZipFile):
if recovery_fstab_path not in input_file.namelist():
recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
else:
path = os.path.join(input_file, *recovery_fstab_path.split('/'))
if not os.path.exists(path):
recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
return LoadRecoveryFSTab(
read_helper, info_dict['fstab_version'], recovery_fstab_path,
system_root_image)
return None
def DumpInfoDict(d):
for k, v in sorted(d.items()):
logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
"""Merges dynamic partition info variables.
Args:
framework_dict: The dictionary of dynamic partition info variables from the
partial framework target files.
vendor_dict: The dictionary of dynamic partition info variables from the
partial vendor target files.
Returns:
The merged dynamic partition info dictionary.
"""
merged_dict = {}
# Partition groups and group sizes are defined by the vendor dict because
# these values may vary for each board that uses a shared system image.
merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
framework_dynamic_partition_list = framework_dict.get(
"dynamic_partition_list", "")
vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list", "")
merged_dict["dynamic_partition_list"] = ("%s %s" % (
framework_dynamic_partition_list, vendor_dynamic_partition_list)).strip()
for partition_group in merged_dict["super_partition_groups"].split(" "):
# Set the partition group's size using the value from the vendor dict.
key = "super_%s_group_size" % partition_group
if key not in vendor_dict:
raise ValueError("Vendor dict does not contain required key %s." % key)
merged_dict[key] = vendor_dict[key]
# Set the partition group's partition list using a concatenation of the
# framework and vendor partition lists.
key = "super_%s_partition_list" % partition_group
merged_dict[key] = (
"%s %s" %
(framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
# Pick virtual ab related flags from vendor dict, if defined.
if "virtual_ab" in vendor_dict.keys():
merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
if "virtual_ab_retrofit" in vendor_dict.keys():
merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
return merged_dict
def AppendAVBSigningArgs(cmd, partition):
"""Append signing arguments for avbtool."""
# e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
new_key_path = os.path.join(OPTIONS.search_path, key_path)
if os.path.exists(new_key_path):
key_path = new_key_path
algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
if key_path and algorithm:
cmd.extend(["--key", key_path, "--algorithm", algorithm])
avb_salt = OPTIONS.info_dict.get("avb_salt")
# make_vbmeta_image doesn't like "--salt" (and it's not needed).
if avb_salt and not partition.startswith("vbmeta"):
cmd.extend(["--salt", avb_salt])
def GetAvbPartitionArg(partition, image, info_dict=None):
"""Returns the VBMeta arguments for partition.
It sets up the VBMeta argument by including the partition descriptor from the
given 'image', or by configuring the partition as a chained partition.
Args:
partition: The name of the partition (e.g. "system").
image: The path to the partition image.
info_dict: A dict returned by common.LoadInfoDict(). Will use
OPTIONS.info_dict if None has been given.
Returns:
A list of VBMeta arguments.
"""
if info_dict is None:
info_dict = OPTIONS.info_dict
# Check if chain partition is used.
key_path = info_dict.get("avb_" + partition + "_key_path")
if not key_path:
return ["--include_descriptors_from_image", image]
# For a non-A/B device, we don't chain /recovery nor include its descriptor
# into vbmeta.img. The recovery image will be configured on an independent
# boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
# See details at
# https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
if info_dict.get("ab_update") != "true" and partition == "recovery":
return []
# Otherwise chain the partition into vbmeta.
chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
return ["--chain_partition", chained_partition_arg]
def GetAvbChainedPartitionArg(partition, info_dict, key=None):
"""Constructs and returns the arg to build or verify a chained partition.
Args:
partition: The partition name.
info_dict: The info dict to look up the key info and rollback index
location.
key: The key to be used for building or verifying the partition. Defaults to
the key listed in info_dict.
Returns:
A string of form "partition:rollback_index_location:key" that can be used to
build or verify vbmeta image.
"""
if key is None:
key = info_dict["avb_" + partition + "_key_path"]
if key and not os.path.exists(key) and OPTIONS.search_path:
new_key_path = os.path.join(OPTIONS.search_path, key)
if os.path.exists(new_key_path):
key = new_key_path
pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
rollback_index_location = info_dict[
"avb_" + partition + "_rollback_index_location"]
return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
def BuildVBMeta(image_path, partitions, name, needed_partitions):
"""Creates a VBMeta image.
It generates the requested VBMeta image. The requested image could be for
top-level or chained VBMeta image, which is determined based on the name.
Args:
image_path: The output path for the new VBMeta image.
partitions: A dict that's keyed by partition names with image paths as
values. Only valid partition names are accepted, as partitions listed
in common.AVB_PARTITIONS and custom partitions listed in
OPTIONS.info_dict.get("avb_custom_images_partition_list")
name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
needed_partitions: Partitions whose descriptors should be included into the
generated VBMeta image.
Raises:
AssertionError: On invalid input args.
"""
avbtool = OPTIONS.info_dict["avb_avbtool"]
cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
AppendAVBSigningArgs(cmd, name)
custom_partitions = OPTIONS.info_dict.get(
"avb_custom_images_partition_list", "").strip().split()
for partition, path in partitions.items():
if partition not in needed_partitions:
continue
assert (partition in AVB_PARTITIONS or
partition in AVB_VBMETA_PARTITIONS or
partition in custom_partitions), \
'Unknown partition: {}'.format(partition)
assert os.path.exists(path), \
'Failed to find {} for {}'.format(path, partition)
cmd.extend(GetAvbPartitionArg(partition, path))
args = OPTIONS.info_dict.get("avb_{}_args".format(name))
if args and args.strip():
split_args = shlex.split(args)
for index, arg in enumerate(split_args[:-1]):
# Sanity check that the image file exists. Some images might be defined
# as a path relative to source tree, which may not be available at the
# same location when running this script (we have the input target_files
# zip only). For such cases, we additionally scan other locations (e.g.
# IMAGES/, RADIO/, etc) before bailing out.
if arg == '--include_descriptors_from_image':
image_path = split_args[index + 1]
if os.path.exists(image_path):
continue
found = False
for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
alt_path = os.path.join(
OPTIONS.input_tmp, dir_name, os.path.basename(image_path))
if os.path.exists(alt_path):
split_args[index + 1] = alt_path
found = True
break
assert found, 'Failed to find {}'.format(image_path)
cmd.extend(split_args)
RunAndCheckOutput(cmd)
if OPTIONS.aftl_server is not None:
# Ensure the other AFTL parameters are set as well.
assert OPTIONS.aftl_key_path is not None, 'No AFTL key provided.'
assert OPTIONS.aftl_manufacturer_key_path is not None, 'No AFTL manufacturer key provided.'
assert OPTIONS.aftl_signer_helper is not None, 'No AFTL signer helper provided.'
# AFTL inclusion proof generation code will go here.
def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False, xz_ramdisks=False):
ramdisk_img = tempfile.NamedTemporaryFile()
if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
cmd = ["mkbootfs", "-f", fs_config_file,
os.path.join(sourcedir, "RAMDISK")]
else:
cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
p1 = Run(cmd, stdout=subprocess.PIPE)
if lz4_ramdisks:
p2 = Run(["lz4", "-l", "-12" , "--favor-decSpeed"], stdin=p1.stdout,
stdout=ramdisk_img.file.fileno())
elif xz_ramdisks:
p2 = Run(["xz", "-f", "-c", "--check=crc32", "--lzma2=dict=32MiB"], stdin=p1.stdout,
stdout=ramdisk_img.file.fileno())
else:
p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
return ramdisk_img
def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
has_ramdisk=False, two_step_image=False):
"""Build a bootable image from the specified sourcedir.
Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
we are building a two-step special image (i.e. building a recovery image to
be loaded into /boot in two-step OTAs).
Return the image data, or None if sourcedir does not appear to contains files
for building the requested image.
"""
# "boot" or "recovery", without extension.
partition_name = os.path.basename(sourcedir).lower()
if partition_name == "recovery":
kernel = "kernel"
else:
kernel = image_name.replace("boot", "kernel")
kernel = kernel.replace(".img","")
if not os.access(os.path.join(sourcedir, kernel), os.F_OK):
return None
if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
return None
if info_dict is None:
info_dict = OPTIONS.info_dict
img = tempfile.NamedTemporaryFile()
if has_ramdisk:
use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
use_xz = info_dict.get("xz_ramdisks") == 'true'
ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, lz4_ramdisks=use_lz4, xz_ramdisks=use_xz)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, kernel)]
fn = os.path.join(sourcedir, "second")
if os.access(fn, os.F_OK):
cmd.append("--second")
cmd.append(fn)
fn = os.path.join(sourcedir, "dtb")
if os.access(fn, os.F_OK):
cmd.append("--dtb")
cmd.append(fn)
fn = os.path.join(sourcedir, "cmdline")
if os.access(fn, os.F_OK):
cmd.append("--cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "pagesize")
if os.access(fn, os.F_OK):
cmd.append("--pagesize")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "dt")
if os.access(fn, os.F_OK):
cmd.append("--dt")
cmd.append(fn)
if partition_name == "recovery":
args = info_dict.get("recovery_mkbootimg_args")
if not args:
# Fall back to "mkbootimg_args" for recovery image
# in case "recovery_mkbootimg_args" is not set.
args = info_dict.get("mkbootimg_args")
else:
args = info_dict.get("mkbootimg_args")
if args and args.strip():
cmd.extend(shlex.split(args))
args = info_dict.get("mkbootimg_version_args")
if args and args.strip():
cmd.extend(shlex.split(args))
if has_ramdisk:
cmd.extend(["--ramdisk", ramdisk_img.name])
img_unsigned = None
if info_dict.get("vboot"):
img_unsigned = tempfile.NamedTemporaryFile()
cmd.extend(["--output", img_unsigned.name])
else:
cmd.extend(["--output", img.name])
if partition_name == "recovery":
if info_dict.get("include_recovery_dtbo") == "true":
fn = os.path.join(sourcedir, "recovery_dtbo")
cmd.extend(["--recovery_dtbo", fn])
if info_dict.get("include_recovery_acpio") == "true":
fn = os.path.join(sourcedir, "recovery_acpio")
cmd.extend(["--recovery_acpio", fn])
RunAndCheckOutput(cmd)
if (info_dict.get("boot_signer") == "true" and
info_dict.get("verity_key")):
# Hard-code the path as "/boot" for two-step special recovery image (which
# will be loaded into /boot during the two-step OTA).
if two_step_image:
path = "/boot"
else:
path = "/" + partition_name
cmd = [OPTIONS.boot_signer_path]
cmd.extend(OPTIONS.boot_signer_args)
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
RunAndCheckOutput(cmd)
# Sign the image if vboot is non-empty.
elif info_dict.get("vboot"):
path = "/" + partition_name
img_keyblock = tempfile.NamedTemporaryFile()
# We have switched from the prebuilt futility binary to using the tool
# (futility-host) built from the source. Override the setting in the old
# TF.zip.
futility = info_dict["futility"]
if futility.startswith("prebuilts/"):
futility = "futility-host"
cmd = [info_dict["vboot_signer_cmd"], futility,
img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
info_dict["vboot_key"] + ".vbprivk",
info_dict["vboot_subkey"] + ".vbprivk",
img_keyblock.name,
img.name]
RunAndCheckOutput(cmd)
# Clean up the temp files.
img_unsigned.close()
img_keyblock.close()
# AVB: if enabled, calculate and add hash to boot.img or recovery.img.
if info_dict.get("avb_enable") == "true":
avbtool = info_dict["avb_avbtool"]
if partition_name == "recovery":
part_size = info_dict["recovery_size"]
else:
part_size = info_dict[image_name.replace(".img","_size")]
cmd = [avbtool, "add_hash_footer", "--image", img.name,
"--partition_size", str(part_size), "--partition_name",
partition_name]
AppendAVBSigningArgs(cmd, partition_name)
args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
RunAndCheckOutput(cmd)
img.seek(os.SEEK_SET, 0)
data = img.read()
if has_ramdisk:
ramdisk_img.close()
img.close()
return data
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
info_dict=None, two_step_image=False):
"""Return a File object with the desired bootable image.
Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
the source files in 'unpack_dir'/'tree_subdir'."""
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
logger.info("building image from target_files %s...", tree_subdir)
if info_dict is None:
info_dict = OPTIONS.info_dict
# With system_root_image == "true", we don't pack ramdisk into the boot image.
# Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
# for recovery.
has_ramdisk = (info_dict.get("system_root_image") != "true" or
prebuilt_name != "boot.img" or
info_dict.get("recovery_as_boot") == "true")
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
os.path.join(unpack_dir, fs_config),
info_dict, has_ramdisk, two_step_image)
if data:
return File(name, data)
return None
def _BuildVendorBootImage(sourcedir, info_dict=None):
"""Build a vendor boot image from the specified sourcedir.
Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
turn them into a vendor boot image.
Return the image data, or None if sourcedir does not appear to contains files
for building the requested image.
"""
if info_dict is None:
info_dict = OPTIONS.info_dict
img = tempfile.NamedTemporaryFile()
use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
use_xz = info_dict.get("xz_ramdisks") == 'true'
ramdisk_img = _MakeRamdisk(sourcedir, lz4_ramdisks=use_lz4, xz_ramdisks=use_xz)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
cmd = [mkbootimg]
fn = os.path.join(sourcedir, "dtb")
if os.access(fn, os.F_OK):
cmd.append("--dtb")
cmd.append(fn)
fn = os.path.join(sourcedir, "vendor_cmdline")
if os.access(fn, os.F_OK):
cmd.append("--vendor_cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "pagesize")
if os.access(fn, os.F_OK):
cmd.append("--pagesize")
cmd.append(open(fn).read().rstrip("\n"))
args = info_dict.get("mkbootimg_args")
if args and args.strip():
cmd.extend(shlex.split(args))
args = info_dict.get("mkbootimg_version_args")
if args and args.strip():
cmd.extend(shlex.split(args))
cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
cmd.extend(["--vendor_boot", img.name])
RunAndCheckOutput(cmd)
# AVB: if enabled, calculate and add hash.
if info_dict.get("avb_enable") == "true":
avbtool = info_dict["avb_avbtool"]
part_size = info_dict["vendor_boot_size"]
cmd = [avbtool, "add_hash_footer", "--image", img.name,
"--partition_size", str(part_size), "--partition_name", "vendor_boot"]
AppendAVBSigningArgs(cmd, "vendor_boot")
args = info_dict.get("avb_vendor_boot_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
RunAndCheckOutput(cmd)
img.seek(os.SEEK_SET, 0)
data = img.read()
ramdisk_img.close()
img.close()
return data
def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
info_dict=None):
"""Return a File object with the desired vendor boot image.
Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
the source files in 'unpack_dir'/'tree_subdir'."""
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
logger.info("building image from target_files %s...", tree_subdir)
if info_dict is None:
info_dict = OPTIONS.info_dict
data = _BuildVendorBootImage(os.path.join(unpack_dir, tree_subdir), info_dict)
if data:
return File(name, data)
return None
def Gunzip(in_filename, out_filename):
"""Gunzips the given gzip compressed file to a given output file."""
with gzip.open(in_filename, "rb") as in_file, \
open(out_filename, "wb") as out_file:
shutil.copyfileobj(in_file, out_file)
def UnzipToDir(filename, dirname, patterns=None):
"""Unzips the archive to the given directory.
Args:
filename: The name of the zip file to unzip.
dirname: Where the unziped files will land.
patterns: Files to unzip from the archive. If omitted, will unzip the entire
archvie. Non-matching patterns will be filtered out. If there's no match
after the filtering, no file will be unzipped.
"""
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if patterns is not None:
# Filter out non-matching patterns. unzip will complain otherwise.
with zipfile.ZipFile(filename) as input_zip:
names = input_zip.namelist()
filtered = [
pattern for pattern in patterns if fnmatch.filter(names, pattern)]
# There isn't any matching files. Don't unzip anything.
if not filtered:
return
cmd.extend(filtered)
RunAndCheckOutput(cmd)
def UnzipTemp(filename, pattern=None):
"""Unzips the given archive into a temporary directory and returns the name.
Args:
filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
pattern: Files to unzip from the archive. If omitted, will unzip the entire
archvie.
Returns:
The name of the temporary directory.
"""
tmp = MakeTempDir(prefix="targetfiles-")
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
if m:
UnzipToDir(m.group(1), tmp, pattern)
UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern)
filename = m.group(1)
else:
UnzipToDir(filename, tmp, pattern)
return tmp
def GetUserImage(which, tmpdir, input_zip,
info_dict=None,
allow_shared_blocks=None,
hashtree_info_generator=None,
reset_file_map=False):
"""Returns an Image object suitable for passing to BlockImageDiff.
This function loads the specified image from the given path. If the specified
image is sparse, it also performs additional processing for OTA purpose. For
example, it always adds block 0 to clobbered blocks list. It also detects
files that cannot be reconstructed from the block list, for whom we should
avoid applying imgdiff.
Args:
which: The partition name.
tmpdir: The directory that contains the prebuilt image and block map file.
input_zip: The target-files ZIP archive.
info_dict: The dict to be looked up for relevant info.
allow_shared_blocks: If image is sparse, whether having shared blocks is
allowed. If none, it is looked up from info_dict.
hashtree_info_generator: If present and image is sparse, generates the
hashtree_info for this sparse image.
reset_file_map: If true and image is sparse, reset file map before returning
the image.
Returns:
A Image object. If it is a sparse image and reset_file_map is False, the
image will have file_map info loaded.
"""
if info_dict is None:
info_dict = LoadInfoDict(input_zip)
is_sparse = info_dict.get("extfs_sparse_flag")
# When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
# shared blocks (i.e. some blocks will show up in multiple files' block
# list). We can only allocate such shared blocks to the first "owner", and
# disable imgdiff for all later occurrences.
if allow_shared_blocks is None:
allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
if is_sparse:
img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
hashtree_info_generator)
if reset_file_map:
img.ResetFileMap()
return img
else:
return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
"""Returns a Image object suitable for passing to BlockImageDiff.
This function loads the specified non-sparse image from the given path.
Args:
which: The partition name.
tmpdir: The directory that contains the prebuilt image and block map file.
Returns:
A Image object.
"""
path = os.path.join(tmpdir, "IMAGES", which + ".img")
mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
# The image and map files must have been created prior to calling
# ota_from_target_files.py (since LMP).
assert os.path.exists(path) and os.path.exists(mappath)
return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
hashtree_info_generator=None):
"""Returns a SparseImage object suitable for passing to BlockImageDiff.
This function loads the specified sparse image from the given path, and
performs additional processing for OTA purpose. For example, it always adds
block 0 to clobbered blocks list. It also detects files that cannot be
reconstructed from the block list, for whom we should avoid applying imgdiff.
Args:
which: The partition name, e.g. "system", "vendor".
tmpdir: The directory that contains the prebuilt image and block map file.
input_zip: The target-files ZIP archive.
allow_shared_blocks: Whether having shared blocks is allowed.
hashtree_info_generator: If present, generates the hashtree_info for this
sparse image.
Returns:
A SparseImage object, with file_map info loaded.
"""
path = os.path.join(tmpdir, "IMAGES", which + ".img")
mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
# The image and map files must have been created prior to calling
# ota_from_target_files.py (since LMP).
assert os.path.exists(path) and os.path.exists(mappath)
# In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
# it to clobbered_blocks so that it will be written to the target
# unconditionally. Note that they are still part of care_map. (Bug: 20939131)
clobbered_blocks = "0"
image = sparse_img.SparseImage(
path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
hashtree_info_generator=hashtree_info_generator)
# block.map may contain less blocks, because mke2fs may skip allocating blocks
# if they contain all zeros. We can't reconstruct such a file from its block
# list. Tag such entries accordingly. (Bug: 65213616)
for entry in image.file_map:
# Skip artificial names, such as "__ZERO", "__NONZERO-1".
if not entry.startswith('/'):
continue
# "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
# filename listed in system.map may contain an additional leading slash
# (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
# results.
arcname = entry.replace(which, which.upper(), 1).lstrip('/')
# Special handling another case, where files not under /system
# (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
if which == 'system' and not arcname.startswith('SYSTEM'):
arcname = 'ROOT/' + arcname
assert arcname in input_zip.namelist(), \
"Failed to find the ZIP entry for {}".format(entry)
info = input_zip.getinfo(arcname)
ranges = image.file_map[entry]
# If a RangeSet has been tagged as using shared blocks while loading the
# image, check the original block list to determine its completeness. Note
# that the 'incomplete' flag would be tagged to the original RangeSet only.
if ranges.extra.get('uses_shared_blocks'):
ranges = ranges.extra['uses_shared_blocks']
if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
ranges.extra['incomplete'] = True
return image
def GetKeyPasswords(keylist):
"""Given a list of keys, prompt the user to enter passwords for
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
no_passwords = []
need_passwords = []
key_passwords = {}
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
# We don't need a password for things that aren't really keys.
if k in SPECIAL_CERT_STRINGS:
no_passwords.append(k)
continue
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
# Definitely an unencrypted key.
no_passwords.append(k)
else:
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-passin", "pass:"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode == 0:
# Encrypted key with empty string as password.
key_passwords[k] = ''
elif stderr.startswith('Error decrypting key'):
# Definitely encrypted key.
# It would have said "Error reading key" if it didn't parse correctly.
need_passwords.append(k)
else:
# Potentially, a type of key that openssl doesn't understand.
# We'll let the routines in signapk.jar handle it.
no_passwords.append(k)
devnull.close()
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
key_passwords.update(dict.fromkeys(no_passwords))
return key_passwords
def GetMinSdkVersion(apk_name):
"""Gets the minSdkVersion declared in the APK.
It calls 'aapt2' to query the embedded minSdkVersion from the given APK file.
This can be both a decimal number (API Level) or a codename.
Args:
apk_name: The APK filename.
Returns:
The parsed SDK version string.
Raises:
ExternalError: On failing to obtain the min SDK version.
"""
proc = Run(
["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
if proc.returncode != 0:
raise ExternalError(
"Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
proc.returncode, stdoutdata, stderrdata))
for line in stdoutdata.split("\n"):
# Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
m = re.match(r'sdkVersion:\'([^\']*)\'', line)
if m:
return m.group(1)
raise ExternalError("No minSdkVersion returned by aapt2")
def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
"""Returns the minSdkVersion declared in the APK as a number (API Level).
If minSdkVersion is set to a codename, it is translated to a number using the
provided map.
Args:
apk_name: The APK filename.
Returns:
The parsed SDK version number.
Raises:
ExternalError: On failing to get the min SDK version number.
"""
version = GetMinSdkVersion(apk_name)
try:
return int(version)
except ValueError:
# Not a decimal number. Codename?
if version in codename_to_api_level_map:
return codename_to_api_level_map[version]
else:
raise ExternalError(
"Unknown minSdkVersion: '{}'. Known codenames: {}".format(
version, codename_to_api_level_map))
def SignFile(input_name, output_name, key, password, min_api_level=None,
codename_to_api_level_map=None, whole_file=False,
extra_signapk_args=None):
"""Sign the input_name zip/jar/apk, producing output_name. Use the
given key and password (the latter may be None if the key does not
have a password.
If whole_file is true, use the "-w" option to SignApk to embed a
signature that covers the whole file in the archive comment of the
zip file.
min_api_level is the API Level (int) of the oldest platform this file may end
up on. If not specified for an APK, the API Level is obtained by interpreting
the minSdkVersion attribute of the APK's AndroidManifest.xml.
codename_to_api_level_map is needed to translate the codename which may be
encountered as the APK's minSdkVersion.
Caller may optionally specify extra args to be passed to SignApk, which
defaults to OPTIONS.extra_signapk_args if omitted.
"""
if codename_to_api_level_map is None:
codename_to_api_level_map = {}
if extra_signapk_args is None:
extra_signapk_args = OPTIONS.extra_signapk_args
java_library_path = os.path.join(
OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
["-Djava.library.path=" + java_library_path,
"-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
extra_signapk_args)
if whole_file:
cmd.append("-w")
min_sdk_version = min_api_level
if min_sdk_version is None:
if not whole_file:
min_sdk_version = GetMinSdkVersionInt(
input_name, codename_to_api_level_map)
if min_sdk_version is not None:
cmd.extend(["--min-sdk-version", str(min_sdk_version)])
cmd.extend([key + OPTIONS.public_key_suffix,
key + OPTIONS.private_key_suffix,
input_name, output_name])
proc = Run(cmd, stdin=subprocess.PIPE)
if password is not None:
password += "\n"
stdoutdata, _ = proc.communicate(password)
if proc.returncode != 0:
raise ExternalError(
"Failed to run signapk.jar: return code {}:\n{}".format(
proc.returncode, stdoutdata))
def CheckSize(data, target, info_dict):
"""Checks the data string passed against the max size limit.
For non-AVB images, raise exception if the data is too big. Print a warning
if the data is nearing the maximum size.
For AVB images, the actual image size should be identical to the limit.
Args:
data: A string that contains all the data for the partition.
target: The partition name. The ".img" suffix is optional.
info_dict: The dict to be looked up for relevant info.
"""
if target.endswith(".img"):
target = target[:-4]
mount_point = "/" + target
fs_type = None
limit = None
if info_dict["fstab"]:
if mount_point == "/userdata":
mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
device = p.device
if "/" in device:
device = device[device.rfind("/")+1:]
limit = info_dict.get(device + "_size")
if not fs_type or not limit:
return
size = len(data)
# target could be 'userdata' or 'cache'. They should follow the non-AVB image
# path.
if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
if size != limit:
raise ExternalError(
"Mismatching image size for %s: expected %d actual %d" % (
target, limit, size))
else:
pct = float(size) * 100.0 / limit
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
logger.warning("\n WARNING: %s\n", msg)
else:
logger.info(" %s", msg)
def ReadApkCerts(tf_zip):
"""Parses the APK certs info from a given target-files zip.
Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
tuple with the following elements: (1) a dictionary that maps packages to
certs (based on the "certificate" and "private_key" attributes in the file;
(2) a string representing the extension of compressed APKs in the target files
(e.g ".gz", ".bro").
Args:
tf_zip: The input target_files ZipFile (already open).
Returns:
(certmap, ext): certmap is a dictionary that maps packages to certs; ext is
the extension string of compressed APKs (e.g. ".gz"), or None if there's
no compressed APKs.
"""
certmap = {}
compressed_extension = None
# META/apkcerts.txt contains the info for _all_ the packages known at build
# time. Filter out the ones that are not installed.
installed_files = set()
for name in tf_zip.namelist():
basename = os.path.basename(name)
if basename:
installed_files.add(basename)
for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
line = line.strip()
if not line:
continue
m = re.match(
r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
r'(\s+partition="(?P<PARTITION>.*?)")?$',
line)
if not m:
continue
matches = m.groupdict()
cert = matches["CERT"]
privkey = matches["PRIVKEY"]
name = matches["NAME"]
this_compressed_extension = matches["COMPRESSED"]
public_key_suffix_len = len(OPTIONS.public_key_suffix)
private_key_suffix_len = len(OPTIONS.private_key_suffix)
if cert in SPECIAL_CERT_STRINGS and not privkey:
certmap[name] = cert
elif (cert.endswith(OPTIONS.public_key_suffix) and
privkey.endswith(OPTIONS.private_key_suffix) and
cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
certmap[name] = cert[:-public_key_suffix_len]
else:
raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
if not this_compressed_extension:
continue
# Only count the installed files.
filename = name + '.' + this_compressed_extension
if filename not in installed_files:
continue
# Make sure that all the values in the compression map have the same
# extension. We don't support multiple compression methods in the same
# system image.
if compressed_extension:
if this_compressed_extension != compressed_extension:
raise ValueError(
"Multiple compressed extensions: {} vs {}".format(
compressed_extension, this_compressed_extension))
else:
compressed_extension = this_compressed_extension
return (certmap,
("." + compressed_extension) if compressed_extension else None)
COMMON_DOCSTRING = """
Global options
-p (--path) <dir>
Prepend <dir>/bin to the list of places to search for binaries run by this
script, and expect to find jars in <dir>/framework.
-s (--device_specific) <file>
Path to the Python module containing device-specific releasetools code.
-x (--extra) <key=value>
Add a key/value pair to the 'extras' dict, which device-specific extension
code may look at.
-v (--verbose)
Show command lines being executed.
-h (--help)
Display this usage message and exit.
--logfile <file>
Put verbose logs to specified file (regardless of --verbose option.)
"""
def Usage(docstring):
print(docstring.rstrip("\n"))
print(COMMON_DOCSTRING)
def ParseOptions(argv,
docstring,
extra_opts="", extra_long_opts=(),
extra_option_handler=None):
"""Parse the options in argv and return any arguments that aren't
flags. docstring is the calling module's docstring, to be displayed
for errors and -h. extra_opts and extra_long_opts are for flags
defined by the caller, which are processed by passing them to
extra_option_handler."""
try:
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=",
"signapk_shared_library_path=", "extra_signapk_args=",
"java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
"verity_signer_path=", "verity_signer_args=", "device_specific=",
"extra=", "logfile=", "aftl_server=", "aftl_key_path=",
"aftl_manufacturer_key_path=", "aftl_signer_helper="] +
list(extra_long_opts))
except getopt.GetoptError as err:
Usage(docstring)
print("**", str(err), "**")
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
Usage(docstring)
sys.exit()
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
OPTIONS.search_path = a
elif o in ("--signapk_path",):
OPTIONS.signapk_path = a
elif o in ("--signapk_shared_library_path",):
OPTIONS.signapk_shared_library_path = a
elif o in ("--extra_signapk_args",):
OPTIONS.extra_signapk_args = shlex.split(a)
elif o in ("--java_path",):
OPTIONS.java_path = a
elif o in ("--java_args",):
OPTIONS.java_args = shlex.split(a)
elif o in ("--android_jar_path",):
OPTIONS.android_jar_path = a
elif o in ("--public_key_suffix",):
OPTIONS.public_key_suffix = a
elif o in ("--private_key_suffix",):
OPTIONS.private_key_suffix = a
elif o in ("--boot_signer_path",):
OPTIONS.boot_signer_path = a
elif o in ("--boot_signer_args",):
OPTIONS.boot_signer_args = shlex.split(a)
elif o in ("--verity_signer_path",):
OPTIONS.verity_signer_path = a
elif o in ("--verity_signer_args",):
OPTIONS.verity_signer_args = shlex.split(a)
elif o in ("--aftl_server",):
OPTIONS.aftl_server = a
elif o in ("--aftl_key_path",):
OPTIONS.aftl_key_path = a
elif o in ("--aftl_manufacturer_key_path",):
OPTIONS.aftl_manufacturer_key_path = a
elif o in ("--aftl_signer_helper",):
OPTIONS.aftl_signer_helper = a
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
key, value = a.split("=", 1)
OPTIONS.extras[key] = value
elif o in ("--logfile",):
OPTIONS.logfile = a
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
if OPTIONS.search_path:
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
os.pathsep + os.environ["PATH"])
return args
def MakeTempFile(prefix='tmp', suffix=''):
"""Make a temp file and add it to the list of things to be deleted
when Cleanup() is called. Return the filename."""
fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.close(fd)
OPTIONS.tempfiles.append(fn)
return fn
def MakeTempDir(prefix='tmp', suffix=''):
"""Makes a temporary dir that will be cleaned up with a call to Cleanup().
Returns:
The absolute pathname of the new directory.
"""
dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
OPTIONS.tempfiles.append(dir_name)
return dir_name
def Cleanup():
for i in OPTIONS.tempfiles:
if os.path.isdir(i):
shutil.rmtree(i, ignore_errors=True)
else:
os.remove(i)
del OPTIONS.tempfiles[:]
class PasswordManager(object):
def __init__(self):
self.editor = os.getenv("EDITOR")
self.pwfile = os.getenv("ANDROID_PW_FILE")
def GetPasswords(self, items):
"""Get passwords corresponding to each string in 'items',
returning a dict. (The dict may have keys in addition to the
values in 'items'.)
Uses the passwords in $ANDROID_PW_FILE if available, letting the
user edit that file to add more needed passwords. If no editor is
available, or $ANDROID_PW_FILE isn't define, prompts the user
interactively in the ordinary way.
"""
current = self.ReadFile()
first = True
while True:
missing = []
for i in items:
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
if not missing:
return current
for i in missing:
current[i] = ""
if not first:
print("key file %s still missing some passwords." % (self.pwfile,))
if sys.version_info[0] >= 3:
raw_input = input # pylint: disable=redefined-builtin
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
first = False
current = self.UpdateAndReadFile(current)
def PromptResult(self, current): # pylint: disable=no-self-use
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
"""
result = {}
for k, v in sorted(current.items()):
if v:
result[k] = v
else:
while True:
result[k] = getpass.getpass(
"Enter password for %s key> " % k).strip()
if result[k]:
break
return result
def UpdateAndReadFile(self, current):
if not self.editor or not self.pwfile:
return self.PromptResult(current)
f = open(self.pwfile, "w")
os.chmod(self.pwfile, 0o600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
for i, (_, k, v) in enumerate(sorted_list):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
first_line = i + 4
f.close()
RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
return self.ReadFile()
def ReadFile(self):
result = {}
if self.pwfile is None:
return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
if not line or line[0] == '#':
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
logger.warning("Failed to parse password file: %s", line)
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
logger.exception("Error reading password file:")
return result
def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
compress_type=None):
import datetime
# http://b/18015246
# Python 2.7's zipfile implementation wrongly thinks that zip64 is required
# for files larger than 2GiB. We can work around this by adjusting their
# limit. Note that `zipfile.writestr()` will not work for strings larger than
# 2GiB. The Python interpreter sometimes rejects strings that large (though
# it isn't clear to me exactly what circumstances cause this).
# `zipfile.write()` must be used directly to work around this.
#
# This mess can be avoided if we port to python3.
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
if compress_type is None:
compress_type = zip_file.compression
if arcname is None:
arcname = filename
saved_stat = os.stat(filename)
try:
# `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
# file to be zipped and reset it when we're done.
os.chmod(filename, perms)
# Use a fixed timestamp so the output is repeatable.
# Note: Use of fromtimestamp rather than utcfromtimestamp here is
# intentional. zip stores datetimes in local time without a time zone
# attached, so we need "epoch" but in the local time zone to get 2009/01/01
# in the zip archive.
local_epoch = datetime.datetime.fromtimestamp(0)
timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
os.utime(filename, (timestamp, timestamp))
zip_file.write(filename, arcname=arcname, compress_type=compress_type)
finally:
os.chmod(filename, saved_stat.st_mode)
os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
zipfile.ZIP64_LIMIT = saved_zip64_limit
def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
compress_type=None):
"""Wrap zipfile.writestr() function to work around the zip64 limit.
Even with the ZIP64_LIMIT workaround, it won't allow writing a string
longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
when calling crc32(bytes).
But it still works fine to write a shorter string into a large zip file.
We should use ZipWrite() whenever possible, and only use ZipWriteStr()
when we know the string won't be too long.
"""
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
zinfo.compress_type = zip_file.compression
if perms is None:
perms = 0o100644
else:
zinfo = zinfo_or_arcname
# Python 2 and 3 behave differently when calling ZipFile.writestr() with
# zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
# such a case (since
# https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
# which seems to make more sense. Otherwise the entry will have 0o000 as the
# permission bits. We follow the logic in Python 3 to get consistent
# behavior between using the two versions.
if not zinfo.external_attr:
zinfo.external_attr = 0o600 << 16
# If compress_type is given, it overrides the value in zinfo.
if compress_type is not None:
zinfo.compress_type = compress_type
# If perms is given, it has a priority.
if perms is not None:
# If perms doesn't set the file type, mark it as a regular file.
if perms & 0o770000 == 0:
perms |= 0o100000
zinfo.external_attr = perms << 16
# Use a fixed timestamp so the output is repeatable.
zinfo.date_time = (2009, 1, 1, 0, 0, 0)
zip_file.writestr(zinfo, data)
zipfile.ZIP64_LIMIT = saved_zip64_limit
def ZipDelete(zip_filename, entries):
"""Deletes entries from a ZIP file.
Since deleting entries from a ZIP file is not supported, it shells out to
'zip -d'.
Args:
zip_filename: The name of the ZIP file.
entries: The name of the entry, or the list of names to be deleted.
Raises:
AssertionError: In case of non-zero return from 'zip'.
"""
if isinstance(entries, str):
entries = [entries]
cmd = ["zip", "-d", zip_filename] + entries
RunAndCheckOutput(cmd)
def ZipClose(zip_file):
# http://b/18015246
# zipfile also refers to ZIP64_LIMIT during close() when it writes out the
# central directory.
saved_zip64_limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = (1 << 32) - 1
zip_file.close()
zipfile.ZIP64_LIMIT = saved_zip64_limit
class DeviceSpecificParams(object):
module = None
def __init__(self, **kwargs):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
for k, v in kwargs.items():
setattr(self, k, v)
self.extras = OPTIONS.extras
if self.module is None:
path = OPTIONS.device_specific
if not path:
return
try:
if os.path.isdir(path):
info = imp.find_module("releasetools", [path])
else:
d, f = os.path.split(path)
b, x = os.path.splitext(f)
if x == ".py":
f = b
info = imp.find_module(f, [d])
logger.info("loaded device-specific extensions from %s", path)
self.module = imp.load_module("device_specific", *info)
except ImportError:
logger.info("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
the given args and kwargs. The first argument to the call will be
the DeviceSpecific object itself. If there is no module, or the
module does not define the function, return the value of the
'default' kwarg (which itself defaults to None)."""
if self.module is None or not hasattr(self.module, function_name):
return kwargs.get("default")
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
def FullOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of a
full OTA package. Implementations can add whatever additional
assertions they like."""
return self._DoCall("FullOTA_Assertions")
def FullOTA_InstallBegin(self):
"""Called at the start of full OTA installation."""
return self._DoCall("FullOTA_InstallBegin")
def FullOTA_GetBlockDifferences(self):
"""Called during full OTA installation and verification.
Implementation should return a list of BlockDifference objects describing
the update on each additional partitions.
"""
return self._DoCall("FullOTA_GetBlockDifferences")
def FullOTA_InstallEnd(self):
"""Called at the end of full OTA installation; typically this is
used to install the image for the device's baseband processor."""
return self._DoCall("FullOTA_InstallEnd")
def FullOTA_PostValidate(self):
"""Called after installing and validating /system; typically this is
used to resize the system partition after a block based installation."""
return self._DoCall("FullOTA_PostValidate")
def IncrementalOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of an
incremental OTA package. Implementations can add whatever
additional assertions they like."""
return self._DoCall("IncrementalOTA_Assertions")
def IncrementalOTA_VerifyBegin(self):
"""Called at the start of the verification phase of incremental
OTA installation; additional checks can be placed here to abort
the script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyBegin")
def IncrementalOTA_VerifyEnd(self):
"""Called at the end of the verification phase of incremental OTA
installation; additional checks can be placed here to abort the
script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyEnd")
def IncrementalOTA_InstallBegin(self):
"""Called at the start of incremental OTA installation (after
verification is complete)."""
return self._DoCall("IncrementalOTA_InstallBegin")
def IncrementalOTA_GetBlockDifferences(self):
"""Called during incremental OTA installation and verification.
Implementation should return a list of BlockDifference objects describing
the update on each additional partitions.
"""
return self._DoCall("IncrementalOTA_GetBlockDifferences")
def IncrementalOTA_InstallEnd(self):
"""Called at the end of incremental OTA installation; typically
this is used to install the image for the device's baseband
processor."""
return self._DoCall("IncrementalOTA_InstallEnd")
def VerifyOTA_Assertions(self):
return self._DoCall("VerifyOTA_Assertions")
class File(object):
def __init__(self, name, data, compress_size=None):
self.name = name
self.data = data
self.size = len(data)
self.compress_size = compress_size or self.size
self.sha1 = sha1(data).hexdigest()
@classmethod
def FromLocalFile(cls, name, diskname):
f = open(diskname, "rb")
data = f.read()
f.close()
return File(name, data)
def WriteToTemp(self):
t = tempfile.NamedTemporaryFile()
t.write(self.data)
t.flush()
return t
def WriteToDir(self, d):
with open(os.path.join(d, self.name), "wb") as fp:
fp.write(self.data)
def AddToZip(self, z, compression=None):
ZipWriteStr(z, self.name, self.data, compress_type=compression)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
".zip" : ["imgdiff", "-z"],
".jar" : ["imgdiff", "-z"],
".apk" : ["imgdiff", "-z"],
".img" : "imgdiff",
}
class Difference(object):
def __init__(self, tf, sf, diff_program=None):
self.tf = tf
self.sf = sf
self.patch = None
self.diff_program = diff_program
def ComputePatch(self):
"""Compute the patch (as a string of data) needed to turn sf into
tf. Returns the same tuple as GetPatch()."""
tf = self.tf
sf = self.sf
if self.diff_program:
diff_program = self.diff_program
else:
ext = os.path.splitext(tf.name)[1]
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
ext = os.path.splitext(tf.name)[1]
try:
ptemp = tempfile.NamedTemporaryFile()
if isinstance(diff_program, list):
cmd = copy.copy(diff_program)
else:
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = []
def run():
_, e = p.communicate()
if e:
err.append(e)
th = threading.Thread(target=run)
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
logger.warning("diff command timed out")
p.terminate()
th.join(5)
if th.is_alive():
p.kill()
th.join()
if p.returncode != 0:
logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
self.patch = None
return None, None, None
diff = ptemp.read()
finally:
ptemp.close()
stemp.close()
ttemp.close()
self.patch = diff
return self.tf, self.sf, self.patch
def GetPatch(self):
"""Returns a tuple of (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn't been called, or if
computing the patch failed.
"""
return self.tf, self.sf, self.patch
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
logger.info("%d diffs to compute", len(diffs))
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
by_size.sort(reverse=True)
by_size = [i[1] for i in by_size]
lock = threading.Lock()
diff_iter = iter(by_size) # accessed under lock
def worker():
try:
lock.acquire()
for d in diff_iter:
lock.release()
start = time.time()
d.ComputePatch()
dur = time.time() - start
lock.acquire()
tf, sf, patch = d.GetPatch()
if sf.name == tf.name:
name = tf.name
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
logger.error("patching failed! %40s", name)
else:
logger.info(
"%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
except Exception:
logger.exception("Failed to compute diff from worker")
raise
# start worker threads; wait for them all to finish.
threads = [threading.Thread(target=worker)
for i in range(OPTIONS.worker_threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
class BlockDifference(object):
def __init__(self, partition, tgt, src=None, check_first_block=False,
version=None, disable_imgdiff=False):
self.tgt = tgt
self.src = src
self.partition = partition
self.check_first_block = check_first_block
self.disable_imgdiff = disable_imgdiff
if version is None:
version = max(
int(i) for i in
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
assert version >= 3
self.version = version
b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
version=self.version,
disable_imgdiff=self.disable_imgdiff)
self.path = os.path.join(MakeTempDir(), partition)
b.Compute(self.path)
self._required_cache = b.max_stashed_size
self.touched_src_ranges = b.touched_src_ranges
self.touched_src_sha1 = b.touched_src_sha1
# On devices with dynamic partitions, for new partitions,
# src is None but OPTIONS.source_info_dict is not.
if OPTIONS.source_info_dict is None:
is_dynamic_build = OPTIONS.info_dict.get(
"use_dynamic_partitions") == "true"
is_dynamic_source = False
else:
is_dynamic_build = OPTIONS.source_info_dict.get(
"use_dynamic_partitions") == "true"
is_dynamic_source = partition in shlex.split(
OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
is_dynamic_target = partition in shlex.split(
OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
# For dynamic partitions builds, check partition list in both source
# and target build because new partitions may be added, and existing
# partitions may be removed.
is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
if is_dynamic:
self.device = 'map_partition("%s")' % partition
else:
if OPTIONS.source_info_dict is None:
_, device_expr = GetTypeAndDeviceExpr("/" + partition,
OPTIONS.info_dict)
else:
_, device_expr = GetTypeAndDeviceExpr("/" + partition,
OPTIONS.source_info_dict)
self.device = device_expr
@property
def required_cache(self):
return self._required_cache
def WriteScript(self, script, output_zip, progress=None,
write_verify_script=False):
if not self.src:
# write the output unconditionally
script.Print("Patching %s image unconditionally..." % (self.partition,))
else:
script.Print("Patching %s image after verification." % (self.partition,))
if progress:
script.ShowProgress(progress, 0)
self._WriteUpdate(script, output_zip)
if write_verify_script:
self.WritePostInstallVerifyScript(script)
def WriteStrictVerifyScript(self, script):
"""Verify all the blocks in the care_map, including clobbered blocks.
This differs from the WriteVerifyScript() function: a) it prints different
error messages; b) it doesn't allow half-way updated images to pass the
verification."""
partition = self.partition
script.Print("Verifying %s..." % (partition,))
ranges = self.tgt.care_map
ranges_str = ranges.to_string_raw()
script.AppendExtra(
'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || '
'ui_print("%s has unexpected contents.");' % (
self.device, ranges_str,
self.tgt.TotalSha1(include_clobbered_blocks=True),
self.partition))
script.AppendExtra("")
def WriteVerifyScript(self, script, touched_blocks_only=False):
partition = self.partition
# full OTA
if not self.src:
script.Print("Image %s will be patched unconditionally." % (partition,))
# incremental OTA
else:
if touched_blocks_only:
ranges = self.touched_src_ranges
expected_sha1 = self.touched_src_sha1
else:
ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
expected_sha1 = self.src.TotalSha1()
# No blocks to be checked, skipping.
if not ranges:
return
ranges_str = ranges.to_string_raw()
script.AppendExtra(
'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
'package_extract_file("%s.transfer.list"), "%s.new.dat", '
'"%s.patch.dat")) then' % (
self.device, ranges_str, expected_sha1,
self.device, partition, partition, partition))
script.Print('Verified %s image...' % (partition,))
script.AppendExtra('else')
if self.version >= 4:
# Bug: 21124327
# When generating incrementals for the system and vendor partitions in
# version 4 or newer, explicitly check the first block (which contains
# the superblock) of the partition to see if it's what we expect. If
# this check fails, give an explicit log message about the partition
# having been remounted R/W (the most likely explanation).
if self.check_first_block:
script.AppendExtra('check_first_block(%s);' % (self.device,))
# If version >= 4, try block recovery before abort update
if partition == "system":
code = ErrorCode.SYSTEM_RECOVER_FAILURE
else:
code = ErrorCode.VENDOR_RECOVER_FAILURE
script.AppendExtra((
'ifelse (block_image_recover({device}, "{ranges}") && '
'block_image_verify({device}, '
'package_extract_file("{partition}.transfer.list"), '
'"{partition}.new.dat", "{partition}.patch.dat"), '
'ui_print("{partition} recovered successfully."), '
'abort("E{code}: {partition} partition fails to recover"));\n'
'endif;').format(device=self.device, ranges=ranges_str,
partition=partition, code=code))
# Abort the OTA update. Note that the incremental OTA cannot be applied
# even if it may match the checksum of the target partition.
# a) If version < 3, operations like move and erase will make changes
# unconditionally and damage the partition.
# b) If version >= 3, it won't even reach here.
else:
if partition == "system":
code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
else:
code = ErrorCode.VENDOR_VERIFICATION_FAILURE
script.AppendExtra((
'abort("E%d: %s partition has unexpected contents");\n'
'endif;') % (code, partition))
def WritePostInstallVerifyScript(self, script):
partition = self.partition
script.Print('Verifying the updated %s image...' % (partition,))
# Unlike pre-install verification, clobbered_blocks should not be ignored.
ranges = self.tgt.care_map
ranges_str = ranges.to_string_raw()
script.AppendExtra(
'if range_sha1(%s, "%s") == "%s" then' % (
self.device, ranges_str,
self.tgt.TotalSha1(include_clobbered_blocks=True)))
# Bug: 20881595
# Verify that extended blocks are really zeroed out.
if self.tgt.extended:
ranges_str = self.tgt.extended.to_string_raw()
script.AppendExtra(
'if range_sha1(%s, "%s") == "%s" then' % (
self.device, ranges_str,
self._HashZeroBlocks(self.tgt.extended.size())))
script.Print('Verified the updated %s image.' % (partition,))
if partition == "system":
code = ErrorCode.SYSTEM_NONZERO_CONTENTS
else:
code = ErrorCode.VENDOR_NONZERO_CONTENTS
script.AppendExtra(
'else\n'
' abort("E%d: %s partition has unexpected non-zero contents after '
'OTA update");\n'
'endif;' % (code, partition))
else:
script.Print('Verified the updated %s image.' % (partition,))
if partition == "system":
code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
else:
code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
script.AppendExtra(
'else\n'
' abort("E%d: %s partition has unexpected contents after OTA '
'update");\n'
'endif;' % (code, partition))
def _WriteUpdate(self, script, output_zip):
ZipWrite(output_zip,
'{}.transfer.list'.format(self.path),
'{}.transfer.list'.format(self.partition))
# For full OTA, compress the new.dat with brotli with quality 6 to reduce
# its size. Quailty 9 almost triples the compression time but doesn't
# further reduce the size too much. For a typical 1.8G system.new.dat
# zip | brotli(quality 6) | brotli(quality 9)
# compressed_size: 942M | 869M (~8% reduced) | 854M
# compression_time: 75s | 265s | 719s
# decompression_time: 15s | 25s | 25s
if not self.src:
brotli_cmd = ['brotli', '--quality=6',
'--output={}.new.dat.br'.format(self.path),
'{}.new.dat'.format(self.path)]
print("Compressing {}.new.dat with brotli".format(self.partition))
RunAndCheckOutput(brotli_cmd)
new_data_name = '{}.new.dat.br'.format(self.partition)
ZipWrite(output_zip,
'{}.new.dat.br'.format(self.path),
new_data_name,
compress_type=zipfile.ZIP_STORED)
else:
new_data_name = '{}.new.dat'.format(self.partition)
ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
ZipWrite(output_zip,
'{}.patch.dat'.format(self.path),
'{}.patch.dat'.format(self.partition),
compress_type=zipfile.ZIP_STORED)
if self.partition == "system":
code = ErrorCode.SYSTEM_UPDATE_FAILURE
else:
code = ErrorCode.VENDOR_UPDATE_FAILURE
call = ('block_image_update({device}, '
'package_extract_file("{partition}.transfer.list"), '
'"{new_data_name}", "{partition}.patch.dat") ||\n'
' abort("E{code}: Failed to update {partition} image.");'.format(
device=self.device, partition=self.partition,
new_data_name=new_data_name, code=code))
script.AppendExtra(script.WordWrap(call))
def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
for p in data:
ctx.update(p)
return ctx.hexdigest()
def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
"""Return the hash value for all zero blocks."""
zero_block = '\x00' * 4096
ctx = sha1()
for _ in range(num_blocks):
ctx.update(zero_block)
return ctx.hexdigest()
# Expose these two classes to support vendor-specific scripts
DataImage = images.DataImage
EmptyImage = images.EmptyImage
# map recovery.fstab's fs_types to mount/format "partition types"
PARTITION_TYPES = {
"ext4": "EMMC",
"emmc": "EMMC",
"f2fs": "EMMC",
"squashfs": "EMMC"
}
def GetTypeAndDevice(mount_point, info, check_no_slot=True):
"""
Use GetTypeAndDeviceExpr whenever possible. This function is kept for
backwards compatibility. It aborts if the fstab entry has slotselect option
(unless check_no_slot is explicitly set to False).
"""
fstab = info["fstab"]
if fstab:
if check_no_slot:
assert not fstab[mount_point].slotselect, \
"Use GetTypeAndDeviceExpr instead"
return (PARTITION_TYPES[fstab[mount_point].fs_type],
fstab[mount_point].device)
else:
raise KeyError
def GetTypeAndDeviceExpr(mount_point, info):
"""
Return the filesystem of the partition, and an edify expression that evaluates
to the device at runtime.
"""
fstab = info["fstab"]
if fstab:
p = fstab[mount_point]
device_expr = '"%s"' % fstab[mount_point].device
if p.slotselect:
device_expr = 'add_slot_suffix(%s)' % device_expr
return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
else:
raise KeyError
def GetEntryForDevice(fstab, device):
"""
Returns:
The first entry in fstab whose device is the given value.
"""
if not fstab:
return None
for mount_point in fstab:
if fstab[mount_point].device == device:
return fstab[mount_point]
return None
def ParseCertificate(data):
"""Parses and converts a PEM-encoded certificate into DER-encoded.
This gives the same result as `openssl x509 -in <filename> -outform DER`.
Returns:
The decoded certificate bytes.
"""
cert_buffer = []
save = False
for line in data.split("\n"):
if "--END CERTIFICATE--" in line:
break
if save:
cert_buffer.append(line)
if "--BEGIN CERTIFICATE--" in line:
save = True
cert = base64.b64decode("".join(cert_buffer))
return cert
def ExtractPublicKey(cert):
"""Extracts the public key (PEM-encoded) from the given certificate file.
Args:
cert: The certificate filename.
Returns:
The public key string.
Raises:
AssertionError: On non-zero return from 'openssl'.
"""
# The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
# While openssl 1.1 writes the key into the given filename followed by '-out',
# openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
# stdout instead.
cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pubkey, stderrdata = proc.communicate()
assert proc.returncode == 0, \
'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
return pubkey
def ExtractAvbPublicKey(avbtool, key):
"""Extracts the AVB public key from the given public or private key.
Args:
avbtool: The AVB tool to use.
key: The input key file, which should be PEM-encoded public or private key.
Returns:
The path to the extracted AVB public key file.
"""
output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
RunAndCheckOutput(
[avbtool, 'extract_public_key', "--key", key, "--output", output])
return output
def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
info_dict=None):
"""Generates the recovery-from-boot patch and writes the script to output.
Most of the space in the boot and recovery images is just the kernel, which is
identical for the two, so the resulting patch should be efficient. Add it to
the output zip, along with a shell script that is run from init.rc on first
boot to actually do the patching and install the new recovery image.
Args:
input_dir: The top-level input directory of the target-files.zip.
output_sink: The callback function that writes the result.
recovery_img: File object for the recovery image.
boot_img: File objects for the boot image.
info_dict: A dict returned by common.LoadInfoDict() on the input
target_files. Will use OPTIONS.info_dict if None has been given.
"""
if info_dict is None:
info_dict = OPTIONS.info_dict
full_recovery_image = info_dict.get("full_recovery_image") == "true"
board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
if board_uses_vendorimage:
# In this case, the output sink is rooted at VENDOR
recovery_img_path = "etc/recovery.img"
recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
else:
# In this case the output sink is rooted at SYSTEM
recovery_img_path = "vendor/etc/recovery.img"
recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
if full_recovery_image:
output_sink(recovery_img_path, recovery_img.data)
else:
system_root_image = info_dict.get("system_root_image") == "true"
path = os.path.join(input_dir, recovery_resource_dat_path)
# With system-root-image, boot and recovery images will have mismatching
# entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
# to handle such a case.
if system_root_image:
diff_program = ["bsdiff"]
bonus_args = ""
assert not os.path.exists(path)
else:
diff_program = ["imgdiff"]
if os.path.exists(path):
diff_program.append("-b")
diff_program.append(path)
bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
else:
bonus_args = ""
d = Difference(recovery_img, boot_img, diff_program=diff_program)
_, _, patch = d.ComputePatch()
output_sink("recovery-from-boot.p", patch)
try:
# The following GetTypeAndDevice()s need to use the path in the target
# info_dict instead of source_info_dict.
boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
check_no_slot=False)
recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
check_no_slot=False)
except KeyError:
return
if full_recovery_image:
# Note that we use /vendor to refer to the recovery resources. This will
# work for a separate vendor partition mounted at /vendor or a
# /system/vendor subdirectory on the system partition, for which init will
# create a symlink from /vendor to /system/vendor.
sh = """#!/vendor/bin/sh
if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
applypatch \\
--flash /vendor/etc/recovery.img \\
--target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
log -t recovery "Installing new recovery image: succeeded" || \\
log -t recovery "Installing new recovery image: failed"
else
log -t recovery "Recovery image already installed"
fi
""" % {'type': recovery_type,
'device': recovery_device,
'sha1': recovery_img.sha1,
'size': recovery_img.size}
else:
sh = """#!/vendor/bin/sh
if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
applypatch %(bonus_args)s \\
--patch /vendor/recovery-from-boot.p \\
--source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
--target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
log -t recovery "Installing new recovery image: succeeded" || \\
log -t recovery "Installing new recovery image: failed"
else
log -t recovery "Recovery image already installed"
fi
""" % {'boot_size': boot_img.size,
'boot_sha1': boot_img.sha1,
'recovery_size': recovery_img.size,
'recovery_sha1': recovery_img.sha1,
'boot_type': boot_type,
'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
'recovery_type': recovery_type,
'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
'bonus_args': bonus_args}
# The install script location moved from /system/etc to /system/bin in the L
# release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
output_sink("bin/install-recovery.sh", sh.encode())
class DynamicPartitionUpdate(object):
def __init__(self, src_group=None, tgt_group=None, progress=None,
block_difference=None):
self.src_group = src_group
self.tgt_group = tgt_group
self.progress = progress
self.block_difference = block_difference
@property
def src_size(self):
if not self.block_difference:
return 0
return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
@property
def tgt_size(self):
if not self.block_difference:
return 0
return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
@staticmethod
def _GetSparseImageSize(img):
if not img:
return 0
return img.blocksize * img.total_blocks
class DynamicGroupUpdate(object):
def __init__(self, src_size=None, tgt_size=None):
# None: group does not exist. 0: no size limits.
self.src_size = src_size
self.tgt_size = tgt_size
class DynamicPartitionsDifference(object):
def __init__(self, info_dict, block_diffs, progress_dict=None,
source_info_dict=None, build_without_vendor=False):
if progress_dict is None:
progress_dict = {}
self._build_without_vendor = build_without_vendor
self._remove_all_before_apply = False
if source_info_dict is None:
self._remove_all_before_apply = True
source_info_dict = {}
block_diff_dict = collections.OrderedDict(
[(e.partition, e) for e in block_diffs])
assert len(block_diff_dict) == len(block_diffs), \
"Duplicated BlockDifference object for {}".format(
[partition for partition, count in
collections.Counter(e.partition for e in block_diffs).items()
if count > 1])
self._partition_updates = collections.OrderedDict()
for p, block_diff in block_diff_dict.items():
self._partition_updates[p] = DynamicPartitionUpdate()
self._partition_updates[p].block_difference = block_diff
for p, progress in progress_dict.items():
if p in self._partition_updates:
self._partition_updates[p].progress = progress
tgt_groups = shlex.split(info_dict.get(
"super_partition_groups", "").strip())
src_groups = shlex.split(source_info_dict.get(
"super_partition_groups", "").strip())
for g in tgt_groups:
for p in shlex.split(info_dict.get(
"super_%s_partition_list" % g, "").strip()):
assert p in self._partition_updates, \
"{} is in target super_{}_partition_list but no BlockDifference " \
"object is provided.".format(p, g)
self._partition_updates[p].tgt_group = g
for g in src_groups:
for p in shlex.split(source_info_dict.get(
"super_%s_partition_list" % g, "").strip()):
assert p in self._partition_updates, \
"{} is in source super_{}_partition_list but no BlockDifference " \
"object is provided.".format(p, g)
self._partition_updates[p].src_group = g
target_dynamic_partitions = set(shlex.split(info_dict.get(
"dynamic_partition_list", "").strip()))
block_diffs_with_target = set(p for p, u in self._partition_updates.items()
if u.tgt_size)
assert block_diffs_with_target == target_dynamic_partitions, \
"Target Dynamic partitions: {}, BlockDifference with target: {}".format(
list(target_dynamic_partitions), list(block_diffs_with_target))
source_dynamic_partitions = set(shlex.split(source_info_dict.get(
"dynamic_partition_list", "").strip()))
block_diffs_with_source = set(p for p, u in self._partition_updates.items()
if u.src_size)
assert block_diffs_with_source == source_dynamic_partitions, \
"Source Dynamic partitions: {}, BlockDifference with source: {}".format(
list(source_dynamic_partitions), list(block_diffs_with_source))
if self._partition_updates:
logger.info("Updating dynamic partitions %s",
self._partition_updates.keys())
self._group_updates = collections.OrderedDict()
for g in tgt_groups:
self._group_updates[g] = DynamicGroupUpdate()
self._group_updates[g].tgt_size = int(info_dict.get(
"super_%s_group_size" % g, "0").strip())
for g in src_groups:
if g not in self._group_updates:
self._group_updates[g] = DynamicGroupUpdate()
self._group_updates[g].src_size = int(source_info_dict.get(
"super_%s_group_size" % g, "0").strip())
self._Compute()
def WriteScript(self, script, output_zip, write_verify_script=False):
script.Comment('--- Start patching dynamic partitions ---')
for p, u in self._partition_updates.items():
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
script.Comment('Patch partition %s' % p)
u.block_difference.WriteScript(script, output_zip, progress=u.progress,
write_verify_script=False)
op_list_path = MakeTempFile()
with open(op_list_path, 'w') as f:
for line in self._op_list:
f.write('{}\n'.format(line))
ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
script.Comment('Update dynamic partition metadata')
script.AppendExtra('assert(update_dynamic_partitions('
'package_extract_file("dynamic_partitions_op_list")));')
if write_verify_script:
for p, u in self._partition_updates.items():
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
u.block_difference.WritePostInstallVerifyScript(script)
script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
for p, u in self._partition_updates.items():
if u.tgt_size and u.src_size <= u.tgt_size:
script.Comment('Patch partition %s' % p)
u.block_difference.WriteScript(script, output_zip, progress=u.progress,
write_verify_script=write_verify_script)
if write_verify_script:
script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
script.Comment('--- End patching dynamic partitions ---')
def _Compute(self):
self._op_list = list()
def append(line):
self._op_list.append(line)
def comment(line):
self._op_list.append("# %s" % line)
if self._build_without_vendor:
comment('System-only build, keep original vendor partition')
# When building without vendor, we do not want to override
# any partition already existing. In this case, we can only
# resize, but not remove / create / re-create any other
# partition.
for p, u in self._partition_updates.items():
comment('Resize partition %s to %s' % (p, u.tgt_size))
append('resize %s %s' % (p, u.tgt_size))
return
if self._remove_all_before_apply:
comment('Remove all existing dynamic partitions and groups before '
'applying full OTA')
append('remove_all_groups')
for p, u in self._partition_updates.items():
if u.src_group and not u.tgt_group:
append('remove %s' % p)
for p, u in self._partition_updates.items():
if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
comment('Move partition %s from %s to default' % (p, u.src_group))
append('move %s default' % p)
for p, u in self._partition_updates.items():
if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
comment('Shrink partition %s from %d to %d' %
(p, u.src_size, u.tgt_size))
append('resize %s %s' % (p, u.tgt_size))
for g, u in self._group_updates.items():
if u.src_size is not None and u.tgt_size is None:
append('remove_group %s' % g)
if (u.src_size is not None and u.tgt_size is not None and
u.src_size > u.tgt_size):
comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
append('resize_group %s %d' % (g, u.tgt_size))
for g, u in self._group_updates.items():
if u.src_size is None and u.tgt_size is not None:
comment('Add group %s with maximum size %d' % (g, u.tgt_size))
append('add_group %s %d' % (g, u.tgt_size))
if (u.src_size is not None and u.tgt_size is not None and
u.src_size < u.tgt_size):
comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
append('resize_group %s %d' % (g, u.tgt_size))
for p, u in self._partition_updates.items():
if u.tgt_group and not u.src_group:
comment('Add partition %s to group %s' % (p, u.tgt_group))
append('add %s %s' % (p, u.tgt_group))
for p, u in self._partition_updates.items():
if u.tgt_size and u.src_size < u.tgt_size:
comment('Grow partition %s from %d to %d' % (p, u.src_size, u.tgt_size))
append('resize %s %d' % (p, u.tgt_size))
for p, u in self._partition_updates.items():
if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
comment('Move partition %s from default to %s' %
(p, u.tgt_group))
append('move %s %s' % (p, u.tgt_group))
|
[
"[email protected]"
] | |
ca64a8620097e64cd0e3f07acf0fdbadf7c62a6a
|
458eafbb2763c8d8056312fe8254fc1b43ce2cf7
|
/CloudShell.py
|
d7ad15aa3a5c0890036221ac7c507001da4be917
|
[] |
no_license
|
hxtin001/SeleniumPython
|
f9bbe052462dfe8a4b68bee5056fad23cfae7953
|
38450ec0eaef771756dd837d4a0b76cbb783d8ca
|
refs/heads/master
| 2020-03-17T20:29:33.500266 | 2018-06-07T03:56:52 | 2018-06-07T03:56:52 | 133,912,648 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,416 |
py
|
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
# from pyvirtualdisplay import Display
import logging.handlers
from selenium.webdriver.common.keys import Keys
import constants
import json
# For display none
# display = Display(visible=True, size=(constants.screen_width_default, constants.screen_height_default))
# display.start()
# For chrome
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_handle = logging.handlers.TimedRotatingFileHandler(constants.path_application_log, when='midnight')
# log_handle = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] - [%(filename)s:%(lineno)s]- %(levelname)s - %(message)s')
log_handle.setFormatter(formatter)
logger.addHandler(log_handle)
class CloudShell:
driver = None
config = None
cases = []
def __init__(self):
logger.info('----------------------------START--------------------------------')
self.load_data_json()
try:
self.driver = webdriver.Chrome(self.config.get('DRIVER_PATH'))
self.driver.get(self.config.get('WEB_URL'))
self.driver.implicitly_wait(100)
logger.info("Connected to {}".format(self.config.get('DRIVER_PATH')))
except Exception as e:
logger.info('Can not connect to website. Error: {}.'.format(e))
raise e
def load_data_json(self, fileName='data.json'):
try:
with open(fileName) as json_data:
data = json.load(json_data)
self.config = data.get("CONFIG")
self.cases = data.get("CASE")
except Exception as e:
logger.error('Cannot load data from json. Error: {}.'.format(e))
raise e
def run(self):
try:
for case in self.cases:
# Send username
usernameEle = self.driver.find_element_by_css_selector("input[type='email']")
usernameEle.send_keys(case.get('USERNAME'))
# Click next button
self.driver.find_element_by_css_selector("div#identifierNext span.RveJvd.snByac").click()
time.sleep(2)
# Sen password
passEle = self.driver.find_element_by_css_selector("input[type='password']")
passEle.send_keys(case.get('PASSWORD'))
# Click next button to login
self.driver.find_element_by_css_selector("div#passwordNext span.RveJvd.snByac").click()
time.sleep(20)
# Move to frame contain terminal
frame = self.driver.find_element_by_css_selector("div.p6n-devshell-background.layout-column iframe.p6n-devshell-frame.layout-fill")
self.driver.switch_to.frame(frame)
# Move to frame terminal
terminalFrame = self.driver.find_element_by_css_selector("body.dark-theme.iframed div#terminal iframe")
self.driver.switch_to.frame(terminalFrame)
# Send script
terminal = self.driver.find_element_by_css_selector("textarea[tabindex='-1']")
terminal.send_keys(case.get("COMMAND"))
terminal.send_keys(Keys.RETURN)
except Exception as e:
logger.info('Step 1 error: {}'.format(e.message))
raise e
def _main():
CloudShell().run()
if __name__ == '__main__':
_main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.