blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13b70f9f71a2f2f8d0002dafe8463212833e9087 | e600c278e9ec62f5475602d37e3607ce4a8bf112 | /VIB/CheckBraAnnot/Trapit2BingoConverter.py | 5359882bcf119dfbae1735b463c0bd35f6780896 | [] | no_license | blad00/PythonLearning | 4f949f6768908cd81619fe7f4b7262118185c4fc | 1dab7fb710da42fb123af2e9fc36a05c6e2c7d4f | refs/heads/master | 2023-01-29T17:25:41.565452 | 2023-01-03T13:22:32 | 2023-01-03T13:22:32 | 184,554,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py |
#file = open("/home/dacru/Midas/research/deepseq/ngsproject_brassica/Annotation2020TRAPID/Results/transcripts_go_exp1086.txt", "r")
#with open("/home/dacru/Midas/research/deepseq/ngsproject_brassica/Annotation2020TRAPID/Results/transcripts_go_exp1105Exome.txt", "r") as file,\
# open("/home/dacru/Midas/research/deepseq/ngsproject_brassica/Annotation2020TRAPID/Results/BrassicaNapusAnnotExome.txt", "w") as outfile:
with open("D:\DanielVIB\Brassica\Annotation2020TRAPID\Results\OrgDown\\transcripts_go_exp1285ExomePlaza4.5.txt", "r") as file,\
open("D:\DanielVIB\Brassica\Annotation2020TRAPID\Results\BrassicaNapusAnnotExomePlaza4.5.txt", "w") as outfile:
next(file)
outfile.write("(species=Brassica)(type=Biological Process)(curator=GO)"+"\n")
for line in file:
fields = line.split("\t")
gene = fields[1]
annot = fields[2].split(":")[1]
outfile.write(gene + " = " + annot+"\n")
| [
"[email protected]"
] | |
cb89f97e3354bae2099d93c24041c615d0d35c80 | 358a80fad5c9a5eb595b6f1ef873ed5d9d350674 | /starfish/image/__init__.py | ed674c4c205440cb16d99ce07f8f85acd3899de0 | [
"MIT"
] | permissive | ttung/starfish | ed1bad0a9c93f2136839f0ebd7f754f899264839 | 1bd8abf55a335620e4b20abb041f478334714081 | refs/heads/master | 2020-05-07T16:25:15.340459 | 2019-04-10T20:52:30 | 2019-04-10T20:52:30 | 175,312,617 | 0 | 0 | MIT | 2019-03-15T19:24:51 | 2019-03-12T23:39:55 | Jupyter Notebook | UTF-8 | Python | false | false | 186 | py | from ._filter import Filter
from ._registration._apply_transform import ApplyTransform
from ._registration._learn_transform import LearnTransform
from ._segmentation import Segmentation
| [
"[email protected]"
] | |
49793642528b5968b3dd7895ec38391e46c8a57b | 6a99547f767b942e2b51b79da0f23a990f3105d3 | /catkin_ws/build/catkin_generated/generate_cached_setup.py | f6dd267e02054db58d28948a3505e28d1e937a94 | [] | no_license | andokeisuke/NHK2020 | 588a1c0070bacaa98f10229252b40eb34c647345 | d6cb3f0c192141e9d87f4faaf7d1d4537ede4a3e | refs/heads/master | 2020-07-11T20:19:29.539799 | 2019-11-22T18:48:04 | 2019-11-22T18:48:04 | 204,635,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/ando/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/ando/catkin_ws/devel/env.sh')
output_filename = '/home/ando/catkin_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
8ed788df0e150819da4a7ae761efb59be4dc25ee | 4ddd19afb6de8fc6db41049d65a690763f81e301 | /interger.py | 37150b4a70debfb3714cf27af4d0a1d52527eb44 | [] | no_license | xjl319/demo | c3a41bd74faaac973a79069faaed158547605c49 | cc7bb8774e56973df52fee8eafb965c758c34208 | refs/heads/master | 2021-05-19T05:20:26.884065 | 2020-04-01T09:18:53 | 2020-04-01T09:18:53 | 251,544,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | #!/usr/bin/env python3.4
days = int(input("Enter days:"))
#months = days // 30
#days = days % 30
print("Months = {} Days = {}".format(*divmod(days, 30)))
| [
"[email protected]"
] | |
d0a19ab81b79d8d1ce7e5f352ffa3d11c792ded9 | 6a2d5e74822cb08c61d5953f5e77d479c756a6c2 | /source/gui/login.py | cf4b20869a40534639b51e27cc2046bc5124a017 | [] | no_license | wang-rq/BlockChainFinal | d24bb5b967ee55c8712a01b5b01dba196b962fb1 | e035c062978cc1cf337b7d9a0746274f53e2c0de | refs/heads/master | 2023-02-21T22:01:19.407739 | 2021-01-24T10:53:40 | 2021-01-24T10:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'login.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Login(object):
def setupUi(self, Login):
Login.setObjectName("Login")
Login.resize(687, 450)
self.centralwidget = QtWidgets.QWidget(Login)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(140, 90, 391, 30))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(220, 150, 215, 81))
self.layoutWidget.setObjectName("layoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.layoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.line_name = QtWidgets.QLineEdit(self.layoutWidget)
self.line_name.setObjectName("line_name")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.line_name)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.line_pwd = QtWidgets.QLineEdit(self.layoutWidget)
self.line_pwd.setObjectName("line_pwd")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.line_pwd)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget1.setGeometry(QtCore.QRect(190, 330, 282, 50))
self.layoutWidget1.setObjectName("layoutWidget1")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget1)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(20)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btn_login = QtWidgets.QPushButton(self.layoutWidget1)
self.btn_login.setObjectName("btn_login")
self.horizontalLayout.addWidget(self.btn_login)
self.btn_quit = QtWidgets.QPushButton(self.layoutWidget1)
self.btn_quit.setObjectName("btn_quit")
self.horizontalLayout.addWidget(self.btn_quit)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(296, 40, 81, 20))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(160, 120, 391, 17))
self.label_5.setObjectName("label_5")
self.btn_signup = QtWidgets.QPushButton(self.centralwidget)
self.btn_signup.setGeometry(QtCore.QRect(340, 270, 80, 25))
self.btn_signup.setObjectName("btn_signup")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(210, 270, 141, 17))
self.label_6.setObjectName("label_6")
Login.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Login)
self.menubar.setGeometry(QtCore.QRect(0, 0, 687, 28))
self.menubar.setObjectName("menubar")
Login.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Login)
self.statusbar.setObjectName("statusbar")
Login.setStatusBar(self.statusbar)
self.retranslateUi(Login)
self.btn_quit.clicked.connect(Login.close)
QtCore.QMetaObject.connectSlotsByName(Login)
def retranslateUi(self, Login):
_translate = QtCore.QCoreApplication.translate
Login.setWindowTitle(_translate("Login", "MainWindow"))
self.label.setText(_translate("Login", "欢迎来到供应链金融平台"))
self.label_3.setText(_translate("Login", "密码"))
self.label_2.setText(_translate("Login", "公司名字"))
self.btn_login.setText(_translate("Login", "确认"))
self.btn_quit.setText(_translate("Login", "退出"))
self.label_4.setText(_translate("Login", "Welcome!"))
self.label_5.setText(_translate("Login", "请输入公司名和密码进行登陆或者进行新用户注册。"))
self.btn_signup.setText(_translate("Login", "注册"))
self.label_6.setText(_translate("Login", "新公司注册:"))
| [
"[email protected]"
] | |
8adbcadcb90863f8f8a2a7e3d65a231bb7f75ba6 | 2ebbcaf1709ce4ae86cffae68665ec64f30db85e | /main.py | bab99effe881734c343ea2f1fbec2e1d2cef5f83 | [] | no_license | br34th3r/PythonMontyHall | fd42a317ee308f5331c9ac9fd45367574d484346 | aba88938e6461bbec3b46940e0f968e1325a8c7a | refs/heads/master | 2020-03-29T17:02:20.934713 | 2018-09-24T17:19:50 | 2018-09-24T17:19:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import random
class MontyHallBot:
doors = [False, False, False]
wins = 0
losses = 0
repeats = 10000
def __init__(self):
self.runTest()
def runTest(self):
for i in range(self.repeats):
self.assignDoors()
self.userSelection()
self.openAnotherDoor()
self.switchUserPosition()
self.winOrLose()
print("WINS: " + str(self.wins))
print("LOSSES: " + str(self.losses))
print((self.wins/self.repeats)*100)
def assignDoors(self):
self.doors = ["Car", "Goat", "Goat"]
random.shuffle(self.doors)
def userSelection(self):
self.choice = random.randrange(3)
def openAnotherDoor(self):
for j, contents in enumerate(self.doors):
if(j != self.choice and contents == "Goat"):
self.reveal = j
break
def switchUserPosition(self):
for j, contents in enumerate(self.doors):
if(j != self.choice and j != self.reveal):
self.choice = j
break
def winOrLose(self):
if(self.doors[self.choice] == "Car"):
self.wins += 1
else:
self.losses += 1
bot = MontyHallBot()
| [
"[email protected]"
] | |
4fa5d282ceb195b2283b81d53b0e422b13a2f9cc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02754/s361197897.py | 2a689fe5085ad480df42a85c9af52f7a8b396057 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python3
# Generated by https://github.com/kyuridenamida/atcoder-tools
from typing import *
import collections
import functools
import itertools
import math
import sys
INF = float('inf')
def solve(N: int, A: int, B: int):
return N//(A+B) * A + min(N % (A+B), A)
def main():
sys.setrecursionlimit(10 ** 6)
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
A = int(next(tokens)) # type: int
B = int(next(tokens)) # type: int
print(f'{solve(N, A, B)}')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0f076b7a411bd5a645dbaee38e82b1a811b60a0f | 7abbb318d16f6c909ee894709f4fbd2cb24cd44e | /Python/Trie/208_Implement_Trie_Prefix_Tree.py | 0407e21f673886a6c776987624de9fe70a578cb7 | [] | no_license | GuilinXie/LeetcodePython | c7c9267d8e64017d40cc4f9ae0fba8e1217875e9 | 4416e6e2b6eb84fb87b081a3b857cfe5a57cf46c | refs/heads/master | 2021-08-07T09:36:13.805208 | 2021-07-29T17:38:58 | 2021-07-29T17:38:58 | 215,821,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = dict()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
p = self.root
for c in word:
if c not in p:
p[c] = dict()
p = p[c]
p["#"] = "#"
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
p = self.root
for c in word:
if c not in p:
return False
p = p[c]
if "#" not in p:
return False
return True
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
p = self.root
for c in prefix:
if c not in p:
return False
p = p[c]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix) | [
"[email protected]"
] | |
0b63d785f0735626b434865eefa30bf505b473c8 | 9b2f4810b093639209b65bbcb5fa07125e17266f | /old_tests/radical.components.py | ad797f0e831b856ff2c486a5970d3dffa9d70687 | [
"MIT"
] | permissive | karahbit/radical.pilot | 887d25d370d08e3455f19cd240677b62278ef67f | c611e1df781749deef899dcf5815728e1d8a962e | refs/heads/devel | 2020-12-21T09:54:10.622036 | 2020-08-20T18:18:12 | 2020-08-20T18:18:12 | 254,967,331 | 0 | 0 | NOASSERTION | 2020-05-01T00:47:51 | 2020-04-11T22:37:20 | null | UTF-8 | Python | false | false | 2,239 | py |
import os
import sys
import time
import radical.utils as ru
import radical.pilot as rp
import radical.pilot.utils as rpu
import radical.pilot.constants as rpc
# ------------------------------------------------------------------------------
#
class CompB(rpu.Component):
def __init__(self, session):
self._uid = 'comp_b'
self._msg = None
self._session = session
rpu.Component.__init__(self, session._cfg, self._session)
def initialize_child(self):
self.register_subscriber(rpc.CONTROL_PUBSUB, self.control_cb_1)
def finalize_child(self):
print "got %s" % self._msg
def control_cb_1(self, topic, msg):
self._msg = msg
# ------------------------------------------------------------------------------
#
class CompA(rpu.Component):
def __init__(self, session):
self._idx = 0
self._uid = 'comp_a'
self._msg = None
self._session = session
rpu.Component.__init__(self, self._session._cfg, self._session)
def initialize_child(self):
self.register_timed_cb(self.idle_cb, timer=0.2)
# self.register_publisher(self.idle_cb, timeout=2.0)
def finalize_child(self):
print 'sent %s' % self._msg
def idle_cb(self):
msg = {'cmd' : 'count',
'arg' : {'idx' : self._idx}}
self.publish(rpc.CONTROL_PUBSUB, msg)
self._idx += 1
self._msg = msg
# ------------------------------------------------------------------------------
#
def test():
s = None
try:
cfg = ru.read_json("%s/session.json" % os.path.dirname(__file__))
dh = ru.DebugHelper()
s = rp.Session(cfg=cfg)
ca1 = CompA(s)
cb1 = CompB(s)
cb2 = CompB(s)
ca1.start()
cb1.start()
cb2.start()
# s._controller.add_things([ca1, cb1, cb2])
time.sleep(3)
finally:
if s:
print 'close'
s.close()
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test()
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
3927283b112cc9cc89e9df31803498a885d27269 | 3cb2408400525c63cf677be0b79910218142a0d1 | /Advanced level/Advanced OOP/01.Defining Classes - Lab +Ex/05. Cup.py | 0d2ebdb5da8937bf6b8cf2ac80e9685d6fc8cc03 | [] | no_license | StefanDimitrovDimitrov/Python_Advanced | 5eed83ba41a4726e8568054a637837db9f859323 | 87c6c13414cb92703bee33650a341d1a369b409b | refs/heads/main | 2023-08-06T06:27:41.594009 | 2021-10-05T18:08:40 | 2021-10-05T18:08:40 | 312,846,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | class Cup():
def __init__(self, size, quantity):
self.size = size
self.quantity = quantity
def fill(self, milliliters):
current_size = self.size - self.quantity
if milliliters <= current_size:
self.quantity += milliliters
def status(self, ):
return self.size - self.quantity
cup = Cup(100, 10)
cup.fill(50)
cup.fill(10)
print(cup.status())
| [
"[email protected]"
] | |
593277ecce8975c15e15ebb55a3b2136ec1dd1fe | edfe02ab1e45708ad09a86ef85d1b53b065de1fe | /sixthStudy/Chapter 14/WindowSize.py | c9eaf7dd55a06ca2eff73142471a964e9973ae07 | [] | no_license | kor-Chipmunk/PythonEducation | d88e490a2d042919dafaff0463dad63611947800 | d98e230646d263e8d9ca3480467f785ff2beda99 | refs/heads/master | 2021-01-11T17:07:21.857179 | 2017-01-30T04:24:10 | 2017-01-30T04:24:10 | 79,725,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title="Window Size")
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLButtonDown)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnMouseRButtonDown)
def OnMouseLButtonDown(self, event):
frame.SetSize(wx.Size(400, 200))
def OnMouseRButtonDown(self, event):
frame.SetSize(wx.Size(200, 400))
if __name__ == '__main__':
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop() | [
"[email protected]"
] | |
55dcf2dc3c7d80abf361e7272dc72e4b521a1db6 | cf69178db43fb62dfaa0781055048339b14f5bc1 | /mix.py | 75421d4283a330fe1aaaa53a40493ca8e9c4a7bf | [] | no_license | dachmiri/secure_communication | 1975983bb10534b692cf2d25a1bc9d350cec52b8 | 85112b5b5406d119c1eeb51d99ec30c22afda2e0 | refs/heads/master | 2023-06-04T23:43:23.023006 | 2021-06-28T11:03:36 | 2021-06-28T11:03:36 | 379,969,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,426 | py | # Maria Dach, 208539080, Shira Negbi, 313236911
import socket
import sys
import random
import threading
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
# a server that listens to messages, shuffles their order, decrypts them and forwards every 60 seconds
class MixServer:
def __init__(self, file_number):
self.file_number = file_number
self.file = None
self.key = None
self.fernet = None
self.received_messages = []
self.ip = None
self.port = -1
self.buffer_size = 0
self.messages_lock = threading.Lock()
self.load_key()
self.init_tcp_params()
# open and read the proper skY.pem file where Y is an int argument
def read_file(self):
file_name = "sk" + str(self.file_number) + ".pem"
file = open(file_name, "r")
self.file = file
return file.read()
# close the skY.pem file
def close_file(self):
self.file.close()
# load the cryptographic key out of the file
def load_key(self):
key_text = self.read_file()
self.key = load_pem_private_key(key_text.encode(), None)
self.close_file()
# initialize the tcp parameters by the ips file
def init_tcp_params(self):
file = open("ips.txt", "r")
lines = file.readlines()
# pick the proper line by the number of the server
line = lines[int(self.file_number) - 1]
num_servers = len(lines)
self.buffer_size = max(8192, 1024 * pow(2, num_servers))
ip, port = line.split(" ")
self.ip = ip
self.port = int(port)
# listen to tcp messages by the given parameters
def receive_messages(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", self.port))
s.listen()
while True:
# add each received message to the list
conn, addr = s.accept()
msg = conn.recv(self.buffer_size)
conn.close()
lock_aquired = self.messages_lock.acquire(False)
while not lock_aquired:
lock_aquired = self.messages_lock.acquire(False)
self.received_messages.append(msg)
self.messages_lock.release()
# forward a message to its destination
def forward_message(self, dst_ip, dst_port, msg):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((dst_ip, dst_port))
s.send(msg)
s.close()
# decrypt a message by the cryptographic key
def decrypt_message(self, msg):
text = self.key.decrypt(
msg,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return text
# decrypt a message and separate the ip and the port from the rest
def decrypt_message_ip_port(self, msg):
decrypted_msg = self.decrypt_message(msg)
dst_ip = decrypted_msg[:4]
dst_ip = ".".join([str(x) for x in dst_ip])
dst_port = decrypted_msg[4:6]
dst_port = int.from_bytes(dst_port,'big')
text = decrypted_msg[6:]
return dst_ip, dst_port, text
# shuffle the messages received so far, decrypt and forward them
def forward_all_messages(self):
lock_aquired = self.messages_lock.acquire(True)
while not lock_aquired:
lock_aquired = self.messages_lock.acquire(True)
messages = self.received_messages[:]
self.received_messages = []
self.messages_lock.release()
random.shuffle(messages)
for msg in messages:
# decrypt each message and send the result to the ip and port appended to its beginning
dst_ip, dst_port, m = self.decrypt_message_ip_port(msg)
self.forward_message(dst_ip, dst_port, m)
def main():
# start a mix server with its number
mix_server = MixServer(sys.argv[1])
# receive and send messages simultaneously
threading.Thread(target=mix_server.receive_messages).start()
while True:
# wait 60 seconds before sending the next group of messages
t = threading.Timer(60, mix_server.forward_all_messages)
t.start()
t.join()
main()
| [
"[email protected]"
] | |
8f940996dd860a113acc4f1d3bfacce29c18292e | 99b32c74b5cc390374063526061a734fff63d985 | /code/kr2krx.py | 012b56fbbbf5b7e262ab21bbf6797335a417960e | [] | no_license | krptest/newkrp | 75d8d8ea92d7d9106db142792a5d6bcfa2e60925 | 00e669205b7f2e5b38de54f503d321ff32efcc6b | refs/heads/master | 2023-06-08T07:20:11.431147 | 2023-06-02T00:35:00 | 2023-06-02T00:35:00 | 220,918,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,017 | py | # -*- coding: utf-8 -*-
# KR to TEI format.
#
import re, os, sys, requests, datetime
from github import Github
from dotenv import load_dotenv
from collections import defaultdict
load_dotenv()
puamagic = 1069056
krx = "KR"
if os.path.exists('../../.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
at=os.environ.get('at')
lang="zho"
# template for xml
# <?xml-model href="http://www.tei-c.org/release/xml/tei/custom/schema/relaxng/tei_all.rng" type="application/xml" schematypens="http://relaxng.org/ns/structure/1.0"?>
#<?xml-model href="http://www.tei-c.org/release/xml/tei/custom/schema/relaxng/tei_all.rng" type="application/xml"
# schematypens="http://purl.oclc.org/dsdl/schematron"?>
# need the following vars:
# user, txtid, title, date, branch, today, body, lang
# body should contain the preformatted content for the body element
tei_template="""<?xml version="1.0" encoding="UTF-8"?>
<TEI xmlns="http://www.tei-c.org/ns/1.0" xml:id="{txtid}_{branch}">
<teiHeader>
<fileDesc>
<titleStmt>
<title>{title}</title>
</titleStmt>
<publicationStmt>
<p>Published by @kanripo on GitHub.com</p>
</publicationStmt>
<sourceDesc>
<p>{branch}</p>
</sourceDesc>
</fileDesc>
<encodingDesc>
<variantEncoding location="external" method="double-end-point"/>
</encodingDesc>
<profileDesc>
<textClass>
<catRef scheme="#kr-categories" target="#{cat}"/>
</textClass>
</profileDesc>
<revisionDesc>
<change resp="#chris"><p>Converted to TEI format for TLS on <date>{today}</date>.</p></change>
</revisionDesc>
</teiHeader>
{sd}
</TEI>
"""
def get_property(p_in):
p = p_in[2:]
pp = p.split(": ")
if pp[0] in ["DATE", "TITLE"]:
return (pp[0], pp[1])
elif pp[0] == "PROPERTY":
p1 = pp[1].split()
return (p1[0], " ".join(p1[1:]))
return "Bad property: %s" % (p_in)
# loop through the lines and return a dictionary of metadata and text content
# gjd is the dictionary to hold gaiji encountered, md is wether we want to care about <md: style tags.
# here we parse the text into paragraphs, instead of surface elements
def parse_text_to_p(lines, gjd, md=False):
lx={'TEXT' : []}
lcnt=0
nl=[]
np=[]
pbdict = defaultdict(int)
pbxmlid=""
for l in lines:
l=re.sub(r"([<]+)([^lmp])", "\\2", l)
l=re.sub(r"([<]+)$", "", l)
l=re.sub("¶", "<lb/>", l)
lcnt += 1
if l.startswith("#+"):
p = get_property(l)
lx[p[0]] = p[1]
continue
elif l.startswith("#"):
continue
elif "<pb:" in l:
pbxmlid=re.sub("<pb:([^_]+)_([^_]+)_([^>]+)>", "\\1_\\2_\\3", l)
if (pbxmlid in pbdict):
continue
else:
pbdict[pbxmlid] += 1
l=re.sub("<pb:([^_]+)_([^_]+)_([^>]+)>", "<pb ed='\\2' n='\\3' xml:id='\\1_\\2_\\3'/>", l)
lcnt = 0
if "<md:" in l:
l=re.sub("<md:([^_]+)_([^_]+)_([^>]+)>", "", l)
if "&KR" in l:
# only for the sideeffect
re.sub("&KR([^;]+);", lambda x : gjd.update({"KR%s" % (x.group(1)) : "%c" % (int(x.group(1)) + puamagic)}), l)
l = re.sub("&KR([^;]+);", lambda x : "%c" % (int(x.group(1)) + puamagic ), l)
# if md:
# pass
# #l=re.sub("¶", f"<!-- ¶ -->", l)
# else:
l = l.replace("(", "<note>")
l = l.replace(")", "</note>")
if not re.match("^</p>", l) and len(l) > 0:
l="%s\n" % (l)
if l == "":
np.append(nl)
nl=[]
else:
if md:
l=l+"\n"
nl.append(l)
np.append(nl)
lx['TEXT'] = np
return lx
# loop through the lines and return a dictionary of metadata and text content
# gjd is the dictionary to hold gaiji encountered, md is wether we want to care about <md: style tags.
#
def parse_text(lines, gjd, md=False):
lx={'TEXT' : []}
lcnt=0
nl=[]
np=[]
pbxmlid=""
for l in lines:
l=re.sub(r"([<]+)([^lmp])", "\\2", l)
l=re.sub(r"([<]+)$", "", l)
l=re.sub("¶", "", l)
lcnt += 1
if l.startswith("#+"):
p = get_property(l)
lx[p[0]] = p[1]
continue
elif l.startswith("#"):
continue
elif "<pb:" in l:
np.append(nl)
nl=[]
pbxmlid=re.sub("<pb:([^_]+)_([^_]+)_([^>]+)>", "\\1_\\2_\\3", l)
l=re.sub("<pb:([^_]+)_([^_]+)_([^>]+)>", "</surface>\n<surface xml:id='\\1_\\2_\\3-z'>\n<pb ed='\\2' n='\\3' xml:id='\\1_\\2_\\3'/>", l)
# l=re.sub("<pb:([^_]+)_([^_]+)_([^>]+)>", "</div></div><div type='p' n='\\3'><div type='l' n='x'>", l)
lcnt = 0
if "<md:" in l:
l=re.sub("<md:([^_]+)_([^_]+)_([^>]+)>", "<!-- md: \\1-\\2-\\3-->", l)
#l = re.sub("&([^;]+);", "<g ref='#\\1'/>", l)
if "&KR" in l:
# only for the sideeffect
re.sub("&KR([^;]+);", lambda x : gjd.update({"KR%s" % (x.group(1)) : "%c" % (int(x.group(1)) + puamagic)}), l)
l = re.sub("&KR([^;]+);", lambda x : "%c" % (int(x.group(1)) + puamagic ), l)
# if md:
# pass
# #l=re.sub("¶", f"<!-- ¶ -->", l)
# else:
l = l.replace("(", "<note>")
l = l.replace(")", "</note>")
if not re.match("^</surface>", l) and len(l) > 0:
l="<line xml:id='%s.%2.2d'>%s</line>\n" % (pbxmlid, lcnt,l)
#l=re.sub("¶", f"\n<lb n='{lcnt}'/>", l)
# if l == "":
# np.append(nl)
# nl=[]
# else:
# if md:
# l=l+"\n"
l = l.replace("KR", krx)
nl.append(l)
np.append(nl)
lx['TEXT'] = np
return lx
def save_text_part(lx, txtid, branch, path):
path = path.replace("KR", krx)
ntxtid = txtid.replace("KR", krx)
if re.match("^[A-Z-]+$", branch):
bt = "/doc/"
else:
bt = "/int/"
try:
os.makedirs(ntxtid + bt + branch)
except:
pass
fname = "%s%s%s/%s.xml" % (ntxtid, bt, branch, path[:-4])
of=open(fname, "w")
localid=path[:-4].split("_")
localid.insert(1, branch)
lid = "_".join(localid)
lid=lid.replace("KR", krx)
if bt == "/int/":
of.write("<div xmlns='http://www.tei-c.org/ns/1.0'><p xml:id='%s'>" % (lid))
else:
of.write("<surfaceGrp xmlns='http://www.tei-c.org/ns/1.0' xml:id='%s'>\n<surface type='dummy'>" % (lid))
for page in lx["TEXT"]:
for line in page:
line = line.replace("KR", krx)
of.write(line)
if bt == "/int/":
of.write("</p></div>\n")
else:
of.write("</surface>\n</surfaceGrp>\n")
def save_gjd (txtid, branch, gjd, type="entity"):
if (type=="entity"):
fname = "%s/aux/map/%s_%s-entity-map.xml" % (txtid, txtid, branch)
else:
fname = "%s/aux/map/%s_%s-entity-g.xml" % (txtid, txtid, branch)
of=open(fname, "w")
of.write("""<?xml version="1.0" encoding="UTF-8"?>
<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="2.0">
<character-map name="krx-map">\n""")
k = [a for a in gjd.keys()]
k.sort()
for kr in k:
if (type=="entity"):
of.write("""<output-character character="%s" string="&%s;"/>\n""" % (gjd[kr], kr))
else:
of.write("""<output-character character="%s" string="<g ref="%s"/>"/>\n""" % (gjd[kr], kr))
of.write("""</character-map>\n</stylesheet>\n""")
of.close()
def convert_text(txtid, user='kanripo'):
gh=Github(at)
hs=gh.get_repo(f"{user}/{txtid}")
#get the branches
branches=[a.name for a in hs.get_branches() if not a.name.startswith("_")]
res=[]
ntxtid = txtid.replace("KR", krx)
for branch in branches:
if re.match("^[A-Z-]+$", branch):
bt = "/doc/"
else:
bt = "/int/"
try:
os.makedirs(ntxtid+ bt + branch)
except:
pass
flist = [a.path for a in hs.get_contents("/", ref=branch)]
print (branch, len(flist))
pdic = {}
md = False
xi=[]
gjd = {}
for path in flist:
if path.startswith(txtid):
r=requests.get(f"https://raw.githubusercontent.com/{user}/{txtid}/{branch}/{path}")
if r.status_code == 200:
cont=r.content.decode(r.encoding)
if "<md:" in cont:
md = True
lines=cont.split("\n")
if bt == "/int/":
lx = parse_text_to_p(lines, gjd, md)
else:
lx = parse_text(lines, gjd, md)
save_text_part(lx, txtid, branch, path)
pdic[path] = lx
#print(path, pdic[path])
else:
return "No valid content found."
date=datetime.datetime.now()
today=f"{date:%Y-%m-%d}"
sd=""
save_gjd (ntxtid, branch, gjd, "entity")
save_gjd (ntxtid, branch, gjd, "g")
for f in pdic.keys():
fn = f[:-4]
fn = fn.replace("KR", krx)
#b=pdic[f]
sd+=f"<xi:include href='{fn}.xml' xmlns:xi='http://www.w3.org/2001/XInclude'/>\n"
try:
lx=pdic[f]
except:
print (f, flist, len(pdic), r.status_code)
sys.exit()
fname = f"{ntxtid}{bt}{branch}/{ntxtid}.xml"
try:
cat = re.findall("K.[0-9][a-z]+", ntxtid)[0]
except:
print("ERROR", ntxtid)
sys.exit()
try:
lxdate=lx['DATE']
except:
lxdate=""
try:
lxtit=lx['TITLE']
except:
lxtit="no_title"
if bt == "/int/":
out=tei_template.format(sd="<text><body>\n%s</body></text>" % (sd), today=today, user=user, txtid=ntxtid, title=lxtit, date=lxdate, branch=branch, cat=cat)
else:
out=tei_template.format(sd="<sourceDoc>\n%s</sourceDoc>" % (sd), today=today, user=user, txtid=ntxtid, title=lxtit, date=lxdate, branch=branch, cat=cat)
of=open(fname, "w")
of.write(out)
of.close()
if __name__ == '__main__':
try:
txtid=sys.argv[1]
except:
print ("Textid should be given as argument.")
sys.exit()
ntxtid = txtid.replace("KR", krx)
try:
os.makedirs(ntxtid+"/aux/map")
os.makedirs(ntxtid+"/doc")
os.makedirs(ntxtid+"/int")
except:
pass
sys.stderr.write("Processing %s\n" % (ntxtid))
convert_text(txtid)
| [
"[email protected]"
] | |
600cbfe11fbd060baa5d89e3fd5b8ac39a2676fd | c8812fd40cd541a0a040dbdf56a307f9de69e971 | /3/3.py | 5266c7f0217412aafc28a32369353135fbc2f899 | [] | no_license | tomelliot/pythonchallenge | c0dac7c99382f41377d5aec529ce76e53c16cd91 | 2d322af70d97cfb2ec099dc4b329b429dbb2a60f | refs/heads/master | 2021-01-01T15:49:40.754310 | 2013-10-08T04:48:17 | 2013-10-08T04:48:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import re
sitetext = open("site.txt").read()
#p = re.compile("[A-Z]{3}[a-z][A-Z]{3}")
p = re.compile("[^A-Z][A-Z]{3}[a-z][A-Z]{3}[^A-Z]")
#m = p.match("SOME TEXT")
#print m.groups()
#m = p.finditer("SOME TEXTOTHErTEXT mOREaTEXt")
m = p.finditer(sitetext)
l = list()
for each in m:
l.append(each.group())
for text in l:
print text[4:5]
| [
"[email protected]"
] | |
b47d614e7fea25510861d6152f5cf82933020ab0 | 76836c6b69217f111bd4d0a2f60e4c0f12315965 | /statsgaim/smspline_csaps.py | 10f2fda1cd0735ebeff1382345a217f98679c1a1 | [
"BSD-3-Clause"
] | permissive | pahal2007/GAIM | 4fcaf1aea5d60f8b0b69a17ed9160fbe3792bc2a | 320184ff3e0ddd9bc031dfddfd3d30c342421d8f | refs/heads/main | 2023-01-24T02:40:13.025112 | 2020-12-07T05:43:41 | 2020-12-07T05:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,023 | py | import numpy as np
from matplotlib import gridspec
from matplotlib import pyplot as plt
from abc import ABCMeta, abstractmethod
from sklearn.utils.extmath import softmax
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_X_y
from sklearn.linear_model import LinearRegression
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
from csaps import CubicSmoothingSpline
__all__ = ["SMSplineRegressor"]
class BaseSMSpline(BaseEstimator, metaclass=ABCMeta):
@abstractmethod
def __init__(self, reg_gamma=0.95, xmin=-1, xmax=1):
self.reg_gamma = reg_gamma
self.xmin = xmin
self.xmax = xmax
def _estimate_density(self, x):
"""method to estimate the density of input data
Parameters
---------
x : array-like of shape (n_samples, n_features)
containing the input dataset
"""
self.density_, self.bins_ = np.histogram(x, bins=10, density=True)
def _validate_hyperparameters(self):
"""method to validate model parameters
"""
if (self.reg_gamma < 0) or (self.reg_gamma > 1):
raise ValueError("reg_gamma must be >= 0 and <1, got %s." % self.reg_gamma)
if self.xmin > self.xmax:
raise ValueError("xmin must be <= xmax, got %s and %s." % (self.xmin, self.xmax))
def diff(self, x, order=1):
"""method to calculate derivatives of the fitted spline to the input
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
order : int
order of derivative
"""
if 'csaps' in str(self.sm_.__class__):
derivative = self.sm_(x,nu=order)
else:
if order == 1:
scalars = self.sm_.coef_
else:
scalars = 0
derivative = np.ones(x.shape[0])*scalars
return derivative
def visualize(self):
"""draw the fitted shape function
"""
check_is_fitted(self, "sm_")
fig = plt.figure(figsize=(6, 4))
inner = gridspec.GridSpec(2, 1, hspace=0.1, height_ratios=[6, 1])
ax1_main = plt.Subplot(fig, inner[0])
xgrid = np.linspace(self.xmin, self.xmax, 100).reshape([-1, 1])
ygrid = self.decision_function(xgrid)
ax1_main.plot(xgrid, ygrid)
ax1_main.set_xticklabels([])
ax1_main.set_title("Shape Function", fontsize=12)
fig.add_subplot(ax1_main)
ax1_density = plt.Subplot(fig, inner[1])
xint = ((np.array(self.bins_[1:]) + np.array(self.bins_[:-1])) / 2).reshape([-1, 1]).reshape([-1])
ax1_density.bar(xint, self.density_, width=xint[1] - xint[0])
ax1_main.get_shared_x_axes().join(ax1_main, ax1_density)
ax1_density.set_yticklabels([])
ax1_density.autoscale()
fig.add_subplot(ax1_density)
plt.show()
def decision_function(self, x):
"""output f(x) for given samples
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
Returns
-------
np.array of shape (n_samples,)
containing f(x)
"""
check_is_fitted(self, "sm_")
x = x.copy()
x[x < self.xmin] = self.xmin
x[x > self.xmax] = self.xmax
try:
pred = self.sm_(x)
except:
pred = self.sm_.predict(x.reshape(-1,1))
return pred.flatten()
class SMSplineRegressor(BaseSMSpline, RegressorMixin):
"""Base class for Cubic Smoothing Spline regression.
Details:
1. This is an API wrapper for the Python package `csaps`.
2. To handle input data with less than 4 unique samples, we replace smoothing spline by glm.
3. During prediction, the data which is outside of the given `xmin` and `xmax` will be clipped to the boundary.
Parameters
----------
reg_gamma : float, optional. default=0.95
the roughness penalty strength of the spline algorithm, range from 0 to 1
xmin : float, optional. default=-1
the min boundary of the input
xmax : float, optional. default=1
the max boundary of the input
"""
def __init__(self, reg_gamma=0.1, xmin=-1, xmax=1):
super(SMSplineRegressor, self).__init__(
reg_gamma=reg_gamma,
xmin=xmin,
xmax=xmax)
def _validate_input(self, x, y):
"""method to validate data
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
y : array-like of shape (n_samples,)
containing the output dataset
"""
x, y = check_X_y(x, y, accept_sparse=["csr", "csc", "coo"],
multi_output=True, y_numeric=True)
return x, y.ravel()
def get_loss(self, label, pred, sample_weight=None):
"""method to calculate the cross entropy loss
Parameters
---------
label : array-like of shape (n_samples,)
containing the input dataset
pred : array-like of shape (n_samples,)
containing the output dataset
sample_weight : array-like of shape (n_samples,), optional
containing sample weights
Returns
-------
float
the cross entropy loss
"""
loss = np.average((label - pred) ** 2, axis=0, weights=sample_weight)
return loss
def fit(self, x, y, sample_weight=None):
"""fit the smoothing spline
Parameters
---------
x : array-like of shape (n_samples, n_features)
containing the input dataset
y : array-like of shape (n_samples,)
containing target values
sample_weight : array-like of shape (n_samples,), optional
containing sample weights
Returns
-------
object
self : Estimator instance.
"""
self._validate_hyperparameters()
x, y = self._validate_input(x, y)
self._estimate_density(x)
unique_num = len(np.unique(x.round(decimals=6)))
if unique_num >= 4:
x_uni, idx_uni = np.unique(x,return_index=True)
y_uni = y[idx_uni]
x_uni_ord = np.sort(x_uni)
y_uni_ord = y_uni[np.argsort(x_uni)]
n_samples = x_uni.shape[0]
if sample_weight is None:
sample_weight = np.ones(n_samples)
else:
sample_weight = sample_weight[idx_uni][np.argsort(x_uni)]
sample_weight = np.round(sample_weight / np.sum(sample_weight) * n_samples, 4)
self.sm_ = CubicSmoothingSpline(xdata=x_uni_ord,
ydata=y_uni_ord,
weights=sample_weight,
smooth=self.reg_gamma)
else:
n_samples = x.shape[0]
if sample_weight is None:
sample_weight = np.ones(n_samples)
else:
sample_weight = np.round(sample_weight / np.sum(sample_weight) * n_samples, 4)
self.sm_ = LinearRegression()
self.sm_.fit(X=x,y=y,sample_weight=sample_weight)
return self
def predict(self, x):
"""output f(x) for given samples
Parameters
---------
x : array-like of shape (n_samples, 1)
containing the input dataset
Returns
-------
np.array of shape (n_samples,)
containing f(x)
"""
pred = self.decision_function(x)
return pred | [
"[email protected]"
] | |
325e05fa01cc833e8682b656048510c1e61bb314 | 12bdeeb22318aa74763b7252c818f690225686ca | /SocialMedia/posts/urls.py | d1327fbf8623e4c5cdfcc687664ecc67d9d603b2 | [] | no_license | khal33d-hub/SocialMedia-webapp | a2e9f2cdc827db348b768f05571a2d1af2a27c31 | 2c2fff68000d54f158ebe076498830495367305d | refs/heads/master | 2023-04-03T21:53:47.671238 | 2021-04-11T12:20:31 | 2021-04-11T12:20:31 | 289,774,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from django.urls import path
from . import views
app_name='posts'
urlpatterns = [
path('', views.PostList.as_view(), name="all"),
path("new/", views.CreatePost.as_view(), name="create"),
path("by/<username>/",views.UserPosts.as_view(),name="for_user"),
path("by/<username>/<int:pk>/",views.PostDetail.as_view(),name="single"),
path("delete/<int:pk>/",views.DeletePost.as_view(),name="delete"),
path('post/<int:pk>/comment/', views.add_comment_to_post, name='add_comment_to_post'),
path('comment/<int:pk>/remove/', views.comment_remove, name='comment_remove'),
]
| [
"[email protected]"
] | |
ac285c90cec5882e04d3fc62492be169bafee8b5 | 49f2de1ab03ec77cb003c77a06ce4fa26501e4b6 | /twitterclone/microblog/migrations/0003_auto_20151021_1944.py | ec15f828169978e4de39c8721778053b0db6e41a | [] | no_license | jesslarsen21/cs4990 | 873cbe7350948bf67d25e8de5aea8354c33031b3 | 3fa2458f41ce0b5c7fb77ca6f1bc45f0a5b971d2 | refs/heads/master | 2021-07-06T15:37:54.826472 | 2016-10-03T19:22:27 | 2016-10-03T19:22:27 | 41,459,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('microblog', '0002_auto_20151021_1934'),
]
operations = [
migrations.RemoveField(
model_name='tweet',
name='user',
),
migrations.AddField(
model_name='tweet',
name='Author',
field=models.ForeignKey(default=0, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
0d10bd7293f8aaadf494897c76d05cc4b005c177 | e3ac4abe1e8d7518e6edd98ac6b15a96536f6a75 | /DetectionImage.py | 1bebe0523b62f5273f99dcf27b98a3a8981985fd | [] | no_license | sirius-mhlee/object-detection-using-tensorflow-faster-rcnn | cfad23a4f4b7e06a4c707f6002109b7b5081d41b | 8193753a3c0ebb60e179c53aa7e3f66cb07a9c5e | refs/heads/master | 2020-04-14T23:59:08.568942 | 2019-01-30T12:57:39 | 2019-01-30T12:57:39 | 164,224,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,997 | py | import sys
import cv2
import os
import random as rand
import numpy as np
import tensorflow as tf
import Configuration as cfg
import AlexNetConv as anc
import RegionProposalNetwork as rpn
import DetectionNetwork as dn
import DataOperator as do
import BBoxOperator as bo
def generate_image(label_file_path, img, nms_detect_list):
label_file = open(label_file_path, 'r')
synset = [line.strip() for line in label_file.readlines()]
label_file.close()
random_color = lambda: (int(rand.random() * 255), int(rand.random() * 255), int(rand.random() * 255))
color = [random_color() for i in range(len(synset))]
save_img = img.copy()
height, width, channel = save_img.shape
for detect in nms_detect_list:
left = int(max(detect[2], 0))
top = int(max(detect[3], 0))
right = int(min(detect[4], width))
bottom = int(min(detect[5], height))
cv2.rectangle(save_img, (left, top), (right, bottom), color[detect[0]], 2)
text_size, baseline = cv2.getTextSize(' ' + synset[detect[0]] + ' ', cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
cv2.rectangle(save_img, (left, top - text_size[1] - (baseline * 2)), (left + text_size[0], top), color[detect[0]], -1)
cv2.putText(save_img, ' ' + synset[detect[0]] + ' ', (left, top - baseline), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
return save_img
def main():
with tf.Session() as sess:
image = tf.placeholder(tf.float32, [1, cfg.image_size_width, cfg.image_size_height, 3])
model = do.load_model(sys.argv[2])
mean = do.load_mean(sys.argv[1])
alexnetconv_model = anc.AlexNetConv(model, mean)
with tf.name_scope('alexnetconv_content'):
alexnetconv_model.build(image)
model = do.load_model(sys.argv[3])
rpn_model = rpn.RegionProposalNetwork(model, False)
with tf.name_scope('rpn_content'):
rpn_model.build(alexnetconv_model.pool5)
model = do.load_model(sys.argv[4])
detection_model = dn.DetectionNetwork(model, False)
with tf.name_scope('detection_content'):
detection_model.build(alexnetconv_model.pool5, rpn_model.rpn_cls_prob, rpn_model.rpn_bbox_pred)
sess.run(tf.global_variables_initializer())
img, expand_np_img, width, height = do.load_image(sys.argv[6])
region_scale_width = cfg.image_size_width / width
region_scale_height = cfg.image_size_height / height
feed_dict = {image:expand_np_img}
nms_region, region_prob, region_bbox = sess.run([detection_model.nms_region, detection_model.cls_prob, detection_model.bbox_pred], feed_dict=feed_dict)
region_bbox = bo.transform_bbox_detect(nms_region[:, 1:], region_bbox)
region_bbox = bo.clip_bbox(region_bbox)
detect_list = []
for i in range(0, cfg.object_class_num):
idx = np.where(region_prob[:, i] > cfg.detect_prob_thresh)[0]
if len(idx) > 0:
prob = region_prob[idx, i]
bbox = region_bbox[idx, i * 4:(i + 1) * 4]
region_list = np.hstack((bbox, prob[:, np.newaxis])).astype(np.float32, copy=False)
keep = bo.nms_bbox(region_list, cfg.detect_nms_thresh)
region_list = region_list[keep, :]
for detect in region_list:
x1 = detect[0] / region_scale_width
y1 = detect[1] / region_scale_height
x2 = detect[2] / region_scale_width
y2 = detect[3] / region_scale_height
if abs(x2 - x1 + 1) >= cfg.detect_size_thresh and abs(y2 - y1 + 1) >= cfg.detect_size_thresh:
detect_list.append((i, detect[4], x1, y1, x2, y2))
save_img = generate_image(sys.argv[5], img, detect_list)
cv2.imwrite(sys.argv[7], save_img)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cafd8c7199ecbe2c8188fffb4db94467fc666216 | 7ba22c9826a1574777a08fb634ff15c56de6cb98 | /domain_adaptation/pixel_domain_adaptation/hparams.py | 924ce0d5878ec2b18e80ae7e47e3aad04b3bd21f | [] | no_license | dhanya1/full_cyclist | 02b85b8331f8ca9364169484ab97b32920cbbd14 | dd12c8d8a3deaaea15041e54f2e459a5041f11c2 | refs/heads/master | 2022-10-17T13:36:51.886476 | 2018-07-30T15:46:02 | 2018-07-30T15:46:02 | 142,896,293 | 0 | 1 | null | 2022-10-05T10:11:01 | 2018-07-30T15:46:15 | Python | UTF-8 | Python | false | false | 8,236 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define model HParams."""
import tensorflow as tf
def create_hparams(hparam_string=None):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
# The name of the architecture to use.
arch='resnet',
lrelu_leakiness=0.2,
batch_norm_decay=0.9,
weight_decay=1e-5,
normal_init_std=0.02,
generator_kernel_size=3,
discriminator_kernel_size=3,
# Stop training after this many examples are processed
# If none, train_bkp indefinitely
num_training_examples=0,
# Apply data augmentation to datasets
# Applies only in training job
augment_source_images=False,
augment_target_images=False,
# Discriminator
# Number of filters in first layer of discriminator
num_discriminator_filters=64,
discriminator_conv_block_size=1, # How many convs to have at each size
discriminator_filter_factor=2.0, # Multiply # filters by this each layer
# Add gaussian noise with this stddev to every hidden layer of D
discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1
# If true, add this gaussian noise to input images to D as well
discriminator_image_noise=False,
discriminator_first_stride=1, # Stride in first conv of discriminator
discriminator_do_pooling=False, # If true, replace stride 2 with avg pool
discriminator_dropout_keep_prob=0.9, # keep probability for dropout
# DCGAN Generator
# Number of filters in generator decoder last layer (repeatedly halved
# from 1st layer)
num_decoder_filters=64,
# Number of filters in generator encoder 1st layer (repeatedly doubled
# after 1st layer)
num_encoder_filters=64,
# This is the shape to which the noise vector is projected (if we're
# transferring from noise).
# Write this way instead of [4, 4, 64] for hparam search flexibility
projection_shape_size=4,
projection_shape_channels=64,
# Indicates the method by which we enlarge the spatial representation
# of an image. Possible values include:
# - resize_conv: Performs a nearest neighbor resize followed by a conv.
# - conv2d_transpose: Performs a conv2d_transpose.
upsample_method='resize_conv',
# Visualization
summary_steps=500, # Output image summary every N steps
###################################
# Task Classifier Hyperparameters #
###################################
# Which task-specific prediction tower to use. Possible choices are:
# none: No task tower.
# doubling_pose_estimator: classifier + quaternion regressor.
# [conv + pool]* + FC
# Classifiers used in DSN paper:
# gtsrb: Classifier used for GTSRB
# svhn: Classifier used for SVHN
# mnist: Classifier used for MNIST
# pose_mini: Classifier + regressor used for pose_mini
task_tower='doubling_pose_estimator',
weight_decay_task_classifier=1e-5,
source_task_loss_weight=1.0,
transferred_task_loss_weight=1.0,
# Number of private layers in doubling_pose_estimator task tower
num_private_layers=2,
# The weight for the log quaternion loss we use for source and transferred
# samples of the cropped_linemod dataset.
# In the DSN work, 1/8 of the classifier weight worked well for our log
# quaternion loss
source_pose_weight=0.125 * 2.0,
transferred_pose_weight=0.125 * 1.0,
# If set to True, the style transfer network also attempts to change its
# weights to maximize the performance of the task tower. If set to False,
# then the style transfer network only attempts to change its weights to
# make the transferred images more likely according to the domain
# classifier.
task_tower_in_g_step=True,
task_loss_in_g_weight=1.0, # Weight of task loss in G
#########################################
# 'simple` generator arch model hparams #
#########################################
simple_num_conv_layers=1,
simple_conv_filters=8,
#########################
# Resnet Hyperparameters#
#########################
resnet_blocks=6, # Number of resnet blocks
resnet_filters=64, # Number of filters per conv in resnet blocks
# If true, add original input back to result of convolutions inside the
# resnet arch. If false, it turns into a simple stack of conv/relu/BN
# layers.
resnet_residuals=True,
#######################################
# The residual / interpretable model. #
#######################################
res_int_blocks=2, # The number of residual blocks.
res_int_convs=2, # The number of conv calls inside each block.
res_int_filters=64, # The number of filters used by each convolution.
####################
# Latent variables #
####################
# if true, then generate random noise and project to input for generator
noise_channel=True,
# The number of dimensions in the input noise vector.
noise_dims=10,
# If true, then one hot encode source image class and project as an
# additional channel for the input to generator. This gives the generator
# access to the class, which may help generation performance.
condition_on_source_class=False,
########################
# Loss Hyperparameters #
########################
domain_loss_weight=1.0,
style_transfer_loss_weight=1.0,
########################################################################
# Encourages the transferred images to be similar to the source images #
# using a configurable metric. #
########################################################################
# The weight of the loss function encouraging the source and transferred
# images to be similar. If set to 0, then the loss function is not used.
transferred_similarity_loss_weight=0.0,
# The type of loss used to encourage transferred and source image
# similarity. Valid values include:
# mpse: Mean Pairwise Squared Error
# mse: Mean Squared Error
# hinged_mse: Computes the mean squared error using squared differences
# greater than hparams.transferred_similarity_max_diff
# hinged_mae: Computes the mean absolute error using absolute
# differences greater than hparams.transferred_similarity_max_diff.
transferred_similarity_loss='mpse',
# The maximum allowable difference between the source and target images.
# This value is used, in effect, to produce a hinge loss. Note that the
# range of values should be between 0 and 1.
transferred_similarity_max_diff=0.4,
################################
# Optimization Hyperparameters #
################################
learning_rate=0.001,
batch_size=32,
lr_decay_steps=20000,
lr_decay_rate=0.95,
# Recomendation from the DCGAN paper:
adam_beta1=0.5,
clip_gradient_norm=5.0,
# The number of times we run the discriminator train_op in a row.
discriminator_steps=1,
# The number of times we run the generator train_op in a row.
generator_steps=1)
if hparam_string:
tf.logging.info('Parsing command line hparams: %s', hparam_string)
hparams.parse(hparam_string)
tf.logging.info('Final parsed hparams: %s', hparams.values())
return hparams
| [
"[email protected]"
] | |
d0e7985c21111234c18de6c012fedbe4ae9050a6 | ed96da6f20a166a3e98b3b9618d030271642572d | /lambda/qldb/export_transport_product.py | f9da8fd1057958c773489d820cc48a740818028c | [
"MIT"
] | permissive | UBC-CIC/VaccineDistribution | fb3c804c612dabde7553f564eaeee944c0cdfc6f | 3bd8d507eaabcf3a3213ec7c1d25127dd221c537 | refs/heads/master | 2023-05-05T04:25:19.615991 | 2021-06-02T16:53:00 | 2021-06-02T16:53:00 | 335,438,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,658 | py | from logging import basicConfig, getLogger, INFO
from connect_to_ledger import create_qldb_driver
from amazon.ion.simpleion import dumps, loads
logger = getLogger(__name__)
basicConfig(level=INFO)
import datetime
from constants import Constants
from register_person import get_index_number,get_scentityid_from_personid,get_scentity_contact,get_document_superadmin_approval_status
from insert_document import insert_documents
from sampledata.sample_data import convert_object_to_ion, get_value_from_documentid,document_exist,update_document
'''
suggestion - make another few funcitons for every company to delegate tasks to their employee's using their person id
for example --> a here carrier company can use assigntrucker(person_id) to acknowledge pick up. Truck driver of that person_id
can use this pick_up_order. This function will have a check for trucker's person_id with original assigned person_id.
Right now scentityid will be checked.
We assume that any person of the carrier company can pickup the order
'''
def create_lorry_reciept(transaction_executor,carrier_id,truck_carrier_person_id,pick_up_location,delivery_location,consignee_id,consignee_name, is_Picked_Up):
lrno = get_index_number(transaction_executor,Constants.LORRY_RECEIPT_TABLE_NAME,Constants.LORRY_RECEIPT_INDEX_NAME)
lorry_reciept = {
"LorryRecieptNumber": lrno ,
"CarrierId":carrier_id,
"TruckerId":truck_carrier_person_id,
"ConsigneeId":consignee_id,
"ConsigneeName": consignee_name,
"PickUpLocation":pick_up_location,
"DeliveryLocation":delivery_location,
"PickUpTime": datetime.datetime.now().timestamp(),
"DeliveryTime": "",
"isPickedUp":is_Picked_Up,
"isDeliveryDone":False
}
lorry_reciept_id = insert_documents(transaction_executor, Constants.LORRY_RECEIPT_TABLE_NAME,convert_object_to_ion(lorry_reciept))
print("LR created: {}!".format(lorry_reciept_id))
return lorry_reciept_id
def create_airway_bill(transaction_executor,sender_id, reciever_id,container_id, air_carrier_id,export_airport_id,import_airport_id):
awbillno = get_index_number(transaction_executor,Constants.AIRWAY_BILL_TABLE_NAME,Constants.AIRWAY_BILL_INDEX_NAME)
export_airport_name = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,export_airport_id,"ScEntityName")
import_airport_name = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,import_airport_id,"ScEntityName")
airway_bill = {
"AirwayBillNumber" : awbillno,
"CarrierId":air_carrier_id,
"ContainerId":container_id,
"AirCarrierApproval":{
"isApproved":False,
"ApproverId":""
},
"RecieverApproval":{
"isApproved": False,
"ApproverId":""
},
"SenderScEntityId":sender_id,
"RecieverScEntityId":reciever_id,
"isDelivered":False,
"WarehouseId":"",
"ExportAirportId":export_airport_id,
"ExportAirportName":export_airport_name[0],
"ImportAirportId":import_airport_id,
"ImportAirportName":import_airport_name[0]
}
airway_bill_id = insert_documents(transaction_executor,Constants.AIRWAY_BILL_TABLE_NAME,convert_object_to_ion(airway_bill))
return airway_bill_id
def create_bill_of_lading(transaction_executor,sender_id, reciever_id,container_id, sea_carrier_id,export_port_id,import_port_id):
bolno = get_index_number(transaction_executor,Constants.BILL_OF_LADING_TABLE_NAME,Constants.BILL_OF_LADING_INDEX_NAME)
export_port_name = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,export_port_id,"ScEntityName")
import_port_name = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,import_port_id,"ScEntityName")
bill_of_lading = {
"BillOfLadingNumber" : bolno,
"CarrierId":sea_carrier_id,
"Container_id":container_id,
"SeaCarrierApproval":{
"isApproved":False,
"ApproverId":""
},
"RecieverApproval":{
"isApproved": False,
"ApproverId":""
},
"SenderScEntityId":sender_id,
"RecieverScEntityId":reciever_id, # If the same carrier is transporting the container then reciever Id will be carrier id
"isDelivered":False,
"WarehouseId":"",
"ExportPortId":export_port_id,
"ExportPortName":export_port_name[0],
"ImportPortId":import_port_id,
"ImportPortName":import_port_name[0]
}
bill_of_lading_id = insert_documents(transaction_executor,Constants.BILL_OF_LADING_TABLE_NAME,convert_object_to_ion(bill_of_lading))
return bill_of_lading_id
def update_document_in_container(transaction_executor,container_id,document_type,document_id):
statement = "FROM {} AS s by id WHERE id = '{}' INSERT INTO s.{} VALUE ?".format(Constants.CONTAINER_TABLE_NAME,container_id,document_type)
# print(statement)
cursor = transaction_executor.execute_statement(statement, document_id)
try:
next(cursor)
logger.info("Document inserted")
except StopIteration:
logger.info("Document cannot be inserted!")
# pick_up container requested by carrier ---> for every container that order entails
def pick_up_order(transaction_executor,pick_up_request_id,truck_carrier_person_id, freight_carrier_id, export_location_id,import_location_id):
if document_exist(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE,pick_up_request_id):
update_document(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE,"isAccepted",pick_up_request_id,True)
purchase_order_id = get_value_from_documentid(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE, pick_up_request_id, "PurchaseOrderId")
purchase_order_id = purchase_order_id[0]
container_ids = get_value_from_documentid(transaction_executor,Constants.PURCHASE_ORDER_TABLE_NAME,purchase_order_id,"HighestPackagingLevelIds")
carrier_company_id = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_ids[0][0],"CarrierCompanyId")
actual_sc_entity_id = get_scentityid_from_personid(transaction_executor,truck_carrier_person_id)
if carrier_company_id[0]== actual_sc_entity_id:
location_ids = [export_location_id,import_location_id]
scentity_type_code = list(map(lambda x: get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,x,"ScEntityTypeCode"),location_ids))
# print(scentity_type_code)
if ["3"] not in scentity_type_code[0] and ["4"] not in scentity_type_code[0] and scentity_type_code[0][0] != scentity_type_code[1][0]:
return_statement = "import and export locations can only be airports or sea ports"
return{
'statusCode': 400,
'body': return_statement}
else:
print("Authorized!")
if get_document_superadmin_approval_status(transaction_executor,Constants.SCENTITY_TABLE_NAME,freight_carrier_id):
product_id = get_value_from_documentid(transaction_executor,Constants.PURCHASE_ORDER_TABLE_NAME,purchase_order_id,"ProductId")
product_id = product_id[0]
manufacturer_id = get_value_from_documentid(transaction_executor,Constants.PRODUCT_TABLE_NAME,product_id,"ManufacturerId")
manufacturer_name = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,manufacturer_id[0],"ScEntityName")
pick_up_location = get_scentity_contact(transaction_executor,manufacturer_id[0],"Address")
delivery_location = get_scentity_contact(transaction_executor,export_location_id,"Address")
logger.info("Pickup location is : {}".format(pick_up_location))
for container_id in container_ids[0]:
isPicked = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"isPicked")
iots = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME, container_id)
if len(iots[0]) == 0:
return_statement = "Assign IoTs first!"
return{
'statusCode': 400,
'body': return_statement}
else:
if isPicked[0] == 0:
lorry_reciept_id = create_lorry_reciept(transaction_executor,actual_sc_entity_id,truck_carrier_person_id,pick_up_location[0],delivery_location[0],manufacturer_id[0],manufacturer_name[0],True)
update_document_in_container(transaction_executor,container_id,"LorryRecieptIds",lorry_reciept_id[0])
export_location_type = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,export_location_id,"ScEntityTypeCode")##
if export_location_type == ['3']:
airway_bill_id = create_airway_bill(transaction_executor,manufacturer_id[0],actual_sc_entity_id, container_id, freight_carrier_id,export_location_id,import_location_id)
update_document_in_container(transaction_executor,container_id,"AirwayBillIds",airway_bill_id[0])
lading_bill_type = "AirwayBill"
elif export_location_type == ['4']:
bill_of_lading_id = create_bill_of_lading(transaction_executor,manufacturer_id[0],actual_sc_entity_id, container_id,freight_carrier_id,export_location_id,import_location_id)
update_document_in_container(transaction_executor,container_id,"BillOfLadingIds",bill_of_lading_id[0])
lading_bill_type = "BillOfLading"
update_document(transaction_executor,Constants.CONTAINER_TABLE_NAME,"isPicked",container_id,True)
else:
return_statement = "Order Already Picked!"
return{
'statusCode': 400,
'body': return_statement}
message = "===================== O R D E R ====== P I C K E D ==================="
return{
'statusCode': 200,
'body': {
"LorryRecieptId" : lorry_reciept_id,
"LadingBillType": lading_bill_type,
"LadingBillId": airway_bill_id,
"Message":message,
}}
else:
return_statement = "Carrier Company is not approved."
return{
'statusCode': 400,
'body': return_statement}
else:
return_statement = "Person not authorized for the pickup"
return{
'statusCode': 400,
'body': return_statement}
else:
return_statement = "Check Request Id"
return{
'statusCode': 400,
'body': return_statement}
# makes LR and give
# custom clerance ( show CoO and PL) --> mark not cleared if IoT anamoly
# delivery is made... check it by sensor data as well
def pick_up_for_export(event):
try:
with create_qldb_driver() as driver:
pickuprequestid = event["PickUpRequestId"]
truckcarrierpersonid = event["PersonId"]
freightcarrierid = event["FreightCarrierId"] # id of the carrier that will take container to destination country --> can be same as carrier company on container or different
exportairportid = event["ExportAirportId"]
importairportid = event["ImportAirportId"]
return driver.execute_lambda(lambda executor: pick_up_order(executor, pickuprequestid,truckcarrierpersonid,freightcarrierid,exportairportid,importairportid))
except Exception:
return_statement = 'Error in Pick Up'
return{
'statusCode': 400,
'body': return_statement} | [
"[email protected]"
] | |
c3f160bc8cbd7f286f7e1f23c49c4353d3e23d8e | 83179c14ae81a2ed0733812195747340c9ef0555 | /Knapsack 1.py | 2aa91145dd47739001ac46a201868a4092bfa645 | [] | no_license | susami-jpg/atcoder_solved_probrem | 20f90ba2e3238c6857e370ed04a8407271ccc36f | 741a4acd79f637d6794c4dbcc2cad1c601b749fc | refs/heads/master | 2023-07-21T12:38:27.460309 | 2021-08-29T10:26:31 | 2021-08-29T10:26:31 | 375,561,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 00:06:57 2021
@author: kazuk
"""
n, w = map(int, input().split())
goods = []
for _ in range(n):
wi, vi = map(int, input().split())
goods.append((wi, vi))
dp = [[0] * (w + 1) for _ in range(n)]
wi, vi = goods[0]
for j in range(w + 1):
if j >= wi:
dp[0][j] = vi
for i in range(1, n):
wi, vi = goods[i]
for j in range(w + 1):
if wi > j:
dp[i][j] = dp[i - 1][j]
else:
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - wi] + vi)
print(dp[-1][-1])
| [
"[email protected]"
] | |
5e849847f304d1016e03bc0a951697713ec9fdd0 | a8f3309a9025617a7d01d221cc9be7975c36eaa4 | /backend/manage.py | 57be28126ba9988b944cabeacc945a674a53025d | [] | no_license | crowdbotics-apps/snowy-wood-26833 | 7b4fa99b8b1a635016bd6441cd09aaf4f52e8cf1 | 40a2042d34ec52eca857a31317ba68c862c96019 | refs/heads/master | 2023-05-01T20:10:00.175134 | 2021-05-18T00:31:03 | 2021-05-18T00:31:03 | 368,356,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snowy_wood_26833.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a538b88f80e1ac1e15faee0b97525d7f40355451 | 0bc5cc0413b6591d651a5f0c401c27b5c7fea05c | /day6.py | d86b0089189e35be1cb4628eb82e42722c2686a4 | [] | no_license | MayankHirani/AdventOfCode2018 | 0b8ed15a0b8f0ffe907b20d012e95c23a327245f | e8143ee9d84b95e3652779301763bdec3dd98952 | refs/heads/master | 2020-09-24T06:13:30.380360 | 2019-12-04T04:37:54 | 2019-12-04T04:37:54 | 225,684,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import string
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.nan)
with open('coordinates.txt') as f:
coordinates = [line.strip() for line in f.readlines()]
values = {}
# Letter creation
letters = []
for letter in string.ascii_uppercase:
letters.append(letter)
letters.append(letter * 2)
#
for index, coordinate in enumerate(coordinates):
x = coordinate.split(',')[0]
y = coordinate.split(',')[1].strip()
values[letters[index]] = ( int(x), int(y) )
grid = np.full((400, 400), ' ')
for value in values:
grid[values[value][0], values[value][1]] = value
plt.plot(grid)
plt.show()
| [
"[email protected]"
] | |
3d7b4350619788a1c0d6a5e6e273df671ddc643c | 0bb5a201d0194d62a71b4c7776aa9604a9fd1f7b | /filtreleme/thresholding2.py | b72076a722e71400b6a66c4fc016e4d1ebff8c06 | [] | no_license | bilge97/digital_image_processing | 144f1b03bc1a2c2cd4f50af6dd9548cf03ed1c97 | e29ebf3f0c55741cbc5b5d5ab4a901d594fa72c7 | refs/heads/master | 2021-05-17T15:49:54.910901 | 2020-04-04T18:57:11 | 2020-04-04T18:58:04 | 250,853,449 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('sayfa.JPG')
img_medianblur = cv2.medianBlur(img, 5)
ret, th1 = cv2.threshold(img , 30,255 , cv2.THRESH_BINARY) #125 altı beyaz 0lar
griton = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
ret, thgray = cv2.threshold(griton , 50,255 , cv2.THRESH_BINARY) #125 altı beyaz 0lar
gaus = cv2.adaptiveThreshold(griton , 255 , cv2.ADAPTIVE_THRESH_GAUSSIAN_C , cv2.THRESH_BINARY , 115 , 1)
ret , otsu = cv2.threshold(griton , 12 , 255 , cv2.THRESH_BINARY+cv2.THRESH_OTSU)
titles = ['original', 'threshbinart', 'thgray', 'gaus', 'otsu', 'median blur']
images = [img, th1, thgray, gaus, otsu, img_medianblur]
for i in range(6):
plt.subplot(3, 3, i+1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
#cv2.imshow('original' , img)
#cv2.imshow('threshbinary' , th1)
#cv2.imshow('thgray' , thgray)
#cv2.imshow('gaus' , gaus)
#cv2.imshow('otsu' , otsu)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
71e6f22f379a61247f6bb9a792d9ca6b67c9a157 | 96f26955cdc30088656e1c99465543285128a8dc | /sub/app/kettle.py | 74f03e85d502cdf88b5f37ab6c4b2378be1fbe06 | [] | no_license | gridsim/behavsim | 5668c25c6ae814647118a58db72293674b79e5d2 | e5c43f5e1507e42d44fa807b8c5faf1e583d7925 | refs/heads/master | 2021-01-18T14:40:55.297348 | 2015-07-17T10:50:02 | 2015-07-17T10:50:02 | 38,006,324 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,186 | py | import random as rd
import numpy as np
import sub.gui
import sub.rd_var
import time
# Defines the kettle function.
def kettle(param, res_dir, text_view):
# Defines the number of minutes in a day
mins = 1440
# Defines the name of the appliance.
name = "Kettle"
# Starts a timer.
start_global_time = time.time()
# Writes intro to terminal.
sub.gui.display(text_view, "New "+name+" Consumption Simulation!", "red")
# Repeats the following for each house.
for i in range(int(round(param["Number of Houses"][0]))):
# Starts a timer.
start_local_time = time.time()
# Initializes the active power vector.
active_power = np.zeros(mins*param["Number of Days"][0])
# Repeats the following for each day.
for j in range(int(round(param["Number of Days"][0]))):
# Continues only if the day is a week day.
if j % 7 != 5 and j % 7 != 6:
# Computes a breakfast use if prob is OK.
if 100*rd.random() <= param["Breakfast Prob [%]"][0]:
l1 = mins*j+sub.rd_var.norm(param["Breakfast Time [hour]"][0]*60,
param["Breakfast Time [hour]"][1]*60,
True)
l2 = l1+sub.rd_var.norm(param["Duration [min]"][0], param["Duration [min]"][1], True)
power = sub.rd_var.norm(param["Power [W]"][0], param["Power [W]"][1], False)
active_power[l1:l2] += power
# Computes a lunch use if prob is OK.
if 100*rd.random() <= param["Lunch Prob [%]"][0]:
l1 = mins*j+sub.rd_var.norm(param["Lunch Time [hour]"][0]*60,
param["Lunch Time [hour]"][1]*60,
True)
l2 = l1+sub.rd_var.norm(param["Duration [min]"][0], param["Duration [min]"][1], True)
power = sub.rd_var.norm(param["Power [W]"][0], param["Power [W]"][1], False)
active_power[l1:l2] += power
# Computes a dinner use if prob is OK.
if 100*rd.random() <= param["Dinner Prob [%]"][0]:
l1 = mins*j+sub.rd_var.norm(param["Dinner Time [hour]"][0]*60,
param["Dinner Time [hour]"][1]*60,
True)
l2 = l1+sub.rd_var.norm(param["Duration [min]"][0], param["Duration [min]"][1], True)
power = sub.rd_var.norm(param["Power [W]"][0], param["Power [W]"][1], False)
active_power[l1:l2] += power
# Continues only if the day is a weekend day.
if j % 7 == 5 or j % 7 == 6:
# Computes a breakfast use if prob is OK.
if 100*rd.random() <= param["Breakfast Prob (WE) [%]"][0]:
l1 = mins*j+sub.rd_var.norm(param["Breakfast Time (WE) [hour]"][0]*60,
param["Breakfast Time (WE) [hour]"][1]*60,
True)
l2 = l1+sub.rd_var.norm(param["Duration [min]"][0], param["Duration [min]"][1], True)
power = sub.rd_var.norm(param["Power [W]"][0], param["Power [W]"][1], False)
active_power[l1:l2] += power
# Computes a lunch use if prob is OK.
if 100*rd.random() <= param["Lunch Prob (WE) [%]"][0]:
l1 = mins*j+sub.rd_var.norm(param["Lunch Time (WE) [hour]"][0]*60,
param["Lunch Time (WE) [hour]"][1]*60,
True)
l2 = l1+sub.rd_var.norm(param["Duration [min]"][0], param["Duration [min]"][1], True)
power = sub.rd_var.norm(param["Power [W]"][0], param["Power [W]"][1], False)
active_power[l1:l2] += power
# Computes a dinner use if prob is OK.
if 100*rd.random() <= param["Dinner Prob (WE) [%]"][0]:
l1 = mins*j+sub.rd_var.norm(param["Dinner Time (WE) [hour]"][0]*60,
param["Dinner Time (WE) [hour]"][1]*60,
True)
l2 = l1+sub.rd_var.norm(param["Duration [min]"][0], param["Duration [min]"][1], True)
power = sub.rd_var.norm(param["Power [W]"][0], param["Power [W]"][1], False)
active_power[l1:l2] += power
# Prapares the data to be saved in a .csv file.
csv_data = map(list, zip(*[active_power]))
# Defines and opens the .csv file.
csv_file = res_dir+"/"+"house_%000006d.csv" % (i+1)
fid_w = open(csv_file, "w")
# Saves the data to the .csv file.
np.savetxt(fid_w, csv_data, fmt="%.5e", delimiter=",")
# Computes the daily average power.
avg_power = sum(active_power)/param["Number of Days"][0]/60000
# Computes the local simulation time.
local_time = time.time()-start_local_time
# Writes the house status to the terminal.
text = "House %d:" % (i+1)
sub.gui.display(text_view, text, "blue")
text = "Daily average energy = %.3f kWh." % avg_power
sub.gui.display(text_view, text, "white")
text = "Duration = %.3f seconds." % local_time
sub.gui.display(text_view, text, "white")
# Computes the global simulation time.
gt = time.time()-start_global_time
h = int(gt//3600.0)
m = int((gt % 3600.0)//60.0)
s = (gt % 3600.0) % 60.0
# Writes the global simulation time to the terminal.
text = "The "+name+" Consumption Simulation took:"
sub.gui.display(text_view, text, "red")
text = "%d hours, %d minutes, and %.3f seconds." % (h, m, s)
sub.gui.display(text_view, text, "red")
| [
"[email protected]"
] | |
e48d8b786b9e71fcd1c3ff1f2d67b49f0d75cad0 | 97062249c6eb04069c6fb01e71d06bc334c828e1 | /apps/useradmin/src/useradmin/migrations/0008_convert_documents.py | 36f55715da32d406e6f06b49cf3b5cb04f2ce3df | [
"Apache-2.0"
] | permissive | Albertsss/hue | 1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e | 454d320dd09b6f7946f3cc05bc97c3e2ca6cd485 | refs/heads/master | 2021-07-08T17:21:13.237871 | 2018-05-30T06:03:21 | 2018-05-30T06:03:21 | 135,386,450 | 0 | 1 | Apache-2.0 | 2020-07-25T13:36:58 | 2018-05-30T04:06:18 | Python | UTF-8 | Python | false | false | 6,343 | py | # -*- coding: utf-8 -*-
from south.v2 import DataMigration
import desktop.management.commands.convert_documents
class Migration(DataMigration):
def forwards(self, orm):
# Earlier we did the document conversions from Doc1 to Doc2 upon loading
# the home page of a user. That approach had certain flaws like shared
# documents didn't show up until the owner logged in and opened his home
# page. Also, home page load time was affected when the conversions failed
# and loading the home page retried the conversions every single time.
# This migration handles the document conversion of all users at
# the same time preventing such flaws. This migration is being done at
# the useradmin level to avoid any dependency issues.
desktop.management.commands.convert_documents.Command().handle_noargs()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'useradmin.grouppermission': {
'Meta': {'object_name': 'GroupPermission'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'hue_permission': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['useradmin.HuePermission']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'useradmin.huepermission': {
'Meta': {'object_name': 'HuePermission'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'app': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'through': u"orm['useradmin.GroupPermission']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'useradmin.ldapgroup': {
'Meta': {'object_name': 'LdapGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group'", 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'useradmin.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'HUE'", 'max_length': '64'}),
'first_login': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'home_directory': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1969, 12, 31, 0, 0)', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['useradmin']
symmetrical = True
| [
"[email protected]"
] | |
314e08627b8fec686f4e1d3cc61211d1d1180274 | 57a8f3e4cf49f0329c4456c67cf5222d3a137ef7 | /bitcoin_tools/analysis/status/__init__.py | e4688f7d07c4f301928ad47953ff4b01e6c98858 | [
"BSD-3-Clause"
] | permissive | beiex/bitcoin_tools | 25d9030e5968466438737fc78cfee404f832541f | 624663f2a9619ecf53715e8bf39404b5c035a9f2 | refs/heads/master | 2020-09-24T03:06:47.727228 | 2019-03-18T12:07:02 | 2019-03-18T12:07:02 | 225,647,121 | 0 | 1 | BSD-3-Clause | 2019-12-03T15:00:30 | 2019-12-03T15:00:29 | null | UTF-8 | Python | false | false | 690 | py | # Fee per byte range
MIN_FEE_PER_BYTE = 0
MAX_FEE_PER_BYTE = 350
FEE_STEP = 1
NSPECIALSCRIPTS = 6
try:
import bitcoin_tools.conf as CFG
except ImportError:
raise Exception("You don't have a configuration file. Make a copy of sample_conf.py")
try:
entries = [CFG.chainstate_path, CFG.data_path, CFG.figs_path, CFG.default_coin]
# If any attribute is not set, raise exception.
if None in entries:
raise Exception("Your configuration file lacks some requited data. Check sample_conf.py")
# If any attribute is not found, also raise exception.
except AttributeError:
raise Exception("Your configuration file lacks some requited data. Check sample_conf.py")
| [
"[email protected]"
] | |
7998e66d3d6cac28284e2dbbd1ac75bda73c5800 | 438f356ee4438fdbd6135877077e72dca82a637b | /Step1-PythonBasic/Practices/yuxq/1-5/ex4.py | b45deea46bb7966392b4ccb5eebb68ab81efadc9 | [
"Apache-2.0"
] | permissive | Jumpers/MysoftAutoTest | f9dd69ffc0f5117b29d9242456f98cdfc7992db8 | 50efc385a96532fc0777061d6c5e7201a4991f04 | refs/heads/master | 2021-01-22T08:59:33.916688 | 2015-02-09T06:49:12 | 2015-02-09T06:49:12 | 23,385,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | cars=100
space_in_a_car=4.0
drivers=30
passengers=90
cars_not_driven=cars-drivers
cars_driven=drivers
carpool_capacity=cars_driven*space_in_a_car
average_passengers_per_car=passengers/cars_driven
print "There are",cars,"cars available."
print "There are only",drivers,"drivers available."
print "There will be",cars_not_driven,"empty cars today."
print "We can transport",carpool_capacity,"people today."
print "We have",passengers,"to carpool today."
print "We need to put about",average_passengers_per_car,"in each car." | [
"[email protected]"
] | |
a123175849f6eb1ad28fc5557333dcee72f6926a | dbe0511ef1426207c2c0e867d9ddae1030f06d85 | /Plant_Disease_Detection_Benchmark_models/Inception_V3/baseline_scratch.py | 2a5a550643378418dd6793cdd104ce2752dc2fd2 | [
"MIT"
] | permissive | singnet/plant-disease-experiments | 91bc8fdec9b121fa8a3008b6cc062224dbcf9d06 | c7888fc1f7f94ba4263ee0c2b6fdb9628ee08ad6 | refs/heads/master | 2020-03-13T06:39:50.982041 | 2019-12-28T14:16:21 | 2019-12-28T14:16:21 | 131,009,311 | 27 | 24 | MIT | 2019-12-08T02:03:16 | 2018-04-25T13:18:52 | Python | UTF-8 | Python | false | false | 771 | py | from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.layers import Input
def build_baseline_model(args, input_shape):
"""
Builds a baseline InceptionV3 model from tensorflow implementation
with no trained weights loaded and including top layers for prediction
Args:
args: necessary args needed for training like train_data_dir, batch_size etc...
input_shape: shape of input tensor
Returns:
baseline inceptionV3 model
"""
iv3 = InceptionV3(input_tensor=Input(shape=input_shape), weights=None,
include_top=True, classes=args.nb_classes)
iv3.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])
return iv3
| [
""
] | |
7b8e4fc51d0c8a51e02356cc89ff20b4ac60ae8e | a32d5c62b8e50a6c774454629db8e21488d1cec8 | /antenna_pattern/combine_samples_and_angles.py | e59033a12297d7988ecac3ce5538d80833ba0ef7 | [
"MIT"
] | permissive | islamshohidul/power_estimation | c931a178d5f88c23152a802bf8a2d7d7fcf26166 | 677177a559e7aab34687746c292c9e5d52318b59 | refs/heads/master | 2022-04-02T08:07:49.745872 | 2020-02-25T20:32:12 | 2020-02-25T20:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,532 | py | """
Functions for loading angles produced using rotctld_angle_printer.py, and
reading GNU Radio samples produced using a file metasink with detached header,
and for interpolating angles based on the GNU Radio timestamps.
Usage example:
```
import combine_samples_and_angles as comb
angles = comb.read_angles('angle_file')
timestamps, samples = comb.load_gnuradio_samples('gnuradio_file')
from frequency_analysis import spectrum
timestamps, spectrum = spectrum(timestamps, samples)
power_spectrum = 10*np.log10(np.abs(spectrum)**2)
azimuth = comb.interpolate_azimuth(timestamps, angles)
#if frequency bin number 236 measures the frequency of the signal we want to estimate the power of:
plt.plot(azimuth, power_spectrum[:, 236])
```
"""
import pandas as pd
import numpy as np
import scipy.interpolate
import gnuradio.blocks as blocks
from gnuradio.blocks import parse_file_metadata
import gnuradio.gr as gr
def load_angles(angle_file):
"""
Read angles and timestamps from file
generated using rotctld_angle_printer.py.
Parameters
----------
angle_file :
File containing angles and corresponding timestamps
Returns
-------
angles : pandas.DataFrame
Dataframe containing angles and timestamps
"""
angles = pd.read_table(angle_file)
angles.timestamp = pd.to_datetime(angles.timestamp)
return angles
import pmt
def read_gnuradio_header_element(file_handle):
"""
Read a header element/header structure from the current position of
a GNU Radio header file. The header file contains multiple header
elements, one for each issued stream tag.
Parameters
----------
file_handle:
File handle for the header file, as obtained using open().
Returns
-------
info: dict
Header structure.
header_length: int
Length of the header element in bytes.
"""
header_str = file_handle.read(parse_file_metadata.HEADER_LENGTH)
if len(header_str) == 0:
return None, 0
header = pmt.deserialize_str(header_str)
info = parse_file_metadata.parse_header(header, False)
#get extra information
if info["extra_len"] > 0:
extra_str = file_handle.read(info["extra_len"])
extra = pmt.deserialize_str(extra_str)
extra_info = parse_file_metadata.parse_extra_dict(extra, info, False)
return info, parse_file_metadata.HEADER_LENGTH + info["extra_len"]
def load_gnuradio_header(gnuradio_hdr_file):
"""
Load GNU Radio meta file header. Function load_gnuradio_samples()
uses this function to read the file header before reading the file data,
it is not necessary to call it explicitly.
The header file will probably contain multiple header instances,
one for each issue of a new tag, but we read only the first.
Parameters
----------
gnuradio_hdr_file : str
GNU Radio header filename
Returns
-------
info : dict
Header info
"""
handle = open(gnuradio_hdr_file)
return read_gnuradio_header_element(handle)[0]
def load_gnuradio_samples(gnuradio_file, return_full_timestamps=False):
"""
Read gnuradio samples and corresponding timestamps from file.
File should be produced using a file meta sink, with detached_header=True.
If vectors are output, or the stream is decimated, make sure that
the relative rate change of the file meta sink is set to the correct
rate. This function will assume that the rx_rate (sample rate) specifies
the sample rate of each vector, corresponding to a row in the output
data matrix.
It is assumed that the source sends a correct rx_time-tag so that this
corresponds to a UNIX timestamp (USRP does this).
Parameters
----------
gnuradio_file :
Filename
return_full_timestamps : boolean, optional
Whether to construct and return full set of timestamps for each sample
(True), or just the timestamp for first and last sample (False).
Constructing timestamps for every sample can be memory-extensive.
Returns
-------
timestamps :
Timestamps for the samples. If return_full_timestamps is set to true,
the timestamps will be a vector of length samples x 1. Otherwise,
timestamps is of length 2 x 1 and will contain the first and last
timestamp.
data :
Samples, matrix of length samples x vec_length.
"""
#read file header
header = load_gnuradio_header(gnuradio_file + ".hdr")
if header['cplx']:
datatype = np.complex64
else:
datatype = np.float32
vec_length = header['size']/datatype(1).itemsize
#read in data
data = np.memmap(gnuradio_file, offset=0, dtype=datatype, mode='r')
if (vec_length > 1):
data = np.reshape(data, (-1, vec_length))
first_timestamp = np.datetime64(pd.to_datetime(header['rx_time'], unit='s'))
sample_period_ns = 1.0/header['rx_rate']*1.0e09
if return_full_timestamps:
#construct timestamps, assuming constant sample rate and no dropped samples
seconds_since_beginning = np.arange(0, np.shape(data)[0])*sample_period_ns * np.timedelta64(1, 'ns')
timestamp = first_timestamp + seconds_since_beginning
else:
timestamp = [first_timestamp, first_timestamp + np.shape(data)[0]*sample_period_ns*np.timedelta64(1, 'ns')]
return timestamp, data
def interpolate_angles(interpolation_timestamps, angles, direction='azimuth'):
"""
Interpolate azimuth/elevation angles with respect to timestamps. Find which azimuth/elevation
angles the interpolation timestamps correspond.
The first angle timestamp is counted as the "epoch", and the timestamps are
recalculated as the number of seconds from this. Samples outside the angle
timerange are counted to belong to the first and last azimuth angle.
Parameters
----------
interpolation_timestamps :
Timestamps on which to interpolate
angles : pandas.DataFrame
Angles dataframe as read using load_angles(), containing timestamps and
azimuth angles as columns
direction : str, optional
Whether to yield azimuth ('azimuth') or elevation ('elevation')
Returns
-------
angles :
Interpolated azimuth or elevation angles in array of length
corresponding to the input timestamps.
"""
first_timestamp = angles.timestamp.as_matrix()[0].copy()
last_timestamp = angles.timestamp.as_matrix()[-1].copy()
subset = (interpolation_timestamps >= first_timestamp) & (interpolation_timestamps <= last_timestamp)
subset_timestamps = interpolation_timestamps[subset].copy()
#set timestamps to be number of seconds since first timestamp
angles_timestamp = (angles.timestamp - first_timestamp).as_matrix()/np.timedelta64(1, 's')
subset_timestamps = (subset_timestamps - first_timestamp)/np.timedelta64(1, 's')
#interpolate recorded angles with respect to timestamps of subset samples
angle_interpolator = scipy.interpolate.interp1d(angles_timestamp, angles[direction].as_matrix())
interpolated_angle = angle_interpolator(subset_timestamps)
#add the samples that were taken outside of the timestamp ranges of the angle measurements
first_part = np.repeat(angles[direction].iloc[0], np.count_nonzero(interpolation_timestamps < first_timestamp))
last_part = np.repeat(angles[direction].iloc[-1], np.count_nonzero(interpolation_timestamps > last_timestamp))
return np.concatenate((first_part, interpolated_angle, last_part))
| [
"[email protected]"
] | |
1645dec31e1080f771e155b93326f4fafd184343 | de1ebef36bc2c913af1891538931977035b8f101 | /blog/migrations/0007_article_slug.py | 7bce80d85a500fab1f77fe78f4723dd0562a840f | [] | no_license | shyamTGR/FHC-BackEnd-Django-AWS | 3b423a9a9f3109c2fbfaaa73bc442be2b5174a84 | 8fc7603131999ca2930d4c11ad268384945a7ee0 | refs/heads/master | 2022-11-17T02:46:14.792447 | 2020-07-13T15:03:46 | 2020-07-13T15:03:46 | 279,322,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # Generated by Django 2.0 on 2020-04-17 08:19
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_remove_article_slug'),
]
operations = [
migrations.AddField(
model_name='article',
name='slug',
field=autoslug.fields.AutoSlugField(default='slug', editable=False, populate_from='title'),
),
]
| [
"[email protected]"
] | |
557aa36e52bc3830b706dfaaa989c019d1a328a5 | 111ed603331a0049dbff842fb74caba0702e7509 | /game.py | 4770ba4a9eab546f0a76e09e13ceed825f0e0df3 | [] | no_license | DK7823808138/Just-a-Hero-Villain-game | 6b2f08ec5b3ee551821bda48b4fd8bd67620ecda | 0ade9a1a64ff1616bd7f67aa869d71de966a16fa | refs/heads/master | 2022-02-27T20:53:59.724954 | 2019-09-10T18:35:37 | 2019-09-10T18:35:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,734 | py | ##Ameet Kumar Rana, Army Institute Of Technology, Pune
##handle : godfather_rana
import pygame
pygame.init()
win = pygame.display.set_mode((852,480))
pygame.display.set_caption("My game")
black = (0,0,0)
walkLeft=[pygame.image.load('assets/animation/hero/L1.png'),pygame.image.load('assets/animation/hero/L2.png'),pygame.image.load('assets/animation/hero/L3.png'),pygame.image.load('assets/animation/hero/L4.png'),pygame.image.load('assets/animation/hero/L5.png'),pygame.image.load('assets/animation/hero/L6.png'),pygame.image.load('assets/animation/hero/L7.png'),pygame.image.load('assets/animation/hero/L8.png'),pygame.image.load('assets/animation/hero/L9.png')]
walkRight=[pygame.image.load('assets/animation/hero/R1.png'),pygame.image.load('assets/animation/hero/R2.png'),pygame.image.load('assets/animation/hero/R3.png'),pygame.image.load('assets/animation/hero/R4.png'),pygame.image.load('assets/animation/hero/R5.png'),pygame.image.load('assets/animation/hero/R6.png'),pygame.image.load('assets/animation/hero/R7.png'),pygame.image.load('assets/animation/hero/R8.png'),pygame.image.load('assets/animation/hero/R9.png')]
walkLeftV=[pygame.image.load('assets/animation/villain/L1E.png'),pygame.image.load('assets/animation/villain/L2E.png'),pygame.image.load('assets/animation/villain/L3E.png'),pygame.image.load('assets/animation/villain/L4E.png'),pygame.image.load('assets/animation/villain/L5E.png'),pygame.image.load('assets/animation/villain/L6E.png'),pygame.image.load('assets/animation/villain/L7E.png'),pygame.image.load('assets/animation/villain/L8E.png'),pygame.image.load('assets/animation/villain/L9E.png'),pygame.image.load('assets/animation/villain/L10E.png'),pygame.image.load('assets/animation/villain/L11E.png')]
walkRightV=[pygame.image.load('assets/animation/villain/R1E.png'),pygame.image.load('assets/animation/villain/R2E.png'),pygame.image.load('assets/animation/villain/R3E.png'),pygame.image.load('assets/animation/villain/R4E.png'),pygame.image.load('assets/animation/villain/R5E.png'),pygame.image.load('assets/animation/villain/R6E.png'),pygame.image.load('assets/animation/villain/R7E.png'),pygame.image.load('assets/animation/villain/R8E.png'),pygame.image.load('assets/animation/villain/R9E.png'),pygame.image.load('assets/animation/villain/R10E.png'),pygame.image.load('assets/animation/villain/R11E.png')]
bg = pygame.image.load('assets/images/bg.jpg')
#char = pygame.image.load('standing.png')
clock = pygame.time.Clock()
bulletSound = pygame.mixer.Sound('assets/music/effects/bullet.wav')
hitSound = pygame.mixer.Sound('assets/music/effects/hit.wav')
pygame.mixer.music.load('assets/music/sound/music.mp3')
pygame.mixer.music.play(-1)
class player(object):
def __init__(self,x,y,width,height):
self.x = x
self.y = y
self.width = width
self.height = height
self.isJump = False
self.jumpCount = 10
self.vel = 5
self.left = False
self.right = False
self.walkCount = 0
self.standing = True
self.box = (self.x + 20,self.y + 10,25,55)
self.health = 10
self.visible = True
def draw(self):
if self.visible:
if self.walkCount + 1 >= 27:
self.walkCount = 0
if not(self.standing):
if self.left:
win.blit(walkLeft[self.walkCount//3],(self.x,self.y))
self.walkCount += 1
elif self.right:
win.blit(walkRight[self.walkCount//3],(self.x,self.y))
self.walkCount += 1
else:
if self.left:
win.blit(walkLeft[0],(self.x,self.y))
else:
win.blit(walkRight[0],(self.x,self.y))
#pygame.draw.rect(win, (255,0,0), (self.x+10,self.y,50,5)) #red bar
#pygame.draw.rect(win, (0,0,255), (self.x+10,self.y - 10,50 - score * 5,5)) #blue bar
#pygame.draw.rect(win, black, (self.x+10,self.y,50,5),1)
self.box = (self.x + 20,self.y + 10,25,55)
#pygame.draw.rect(win, black, self.box,1)
def hit(self):
self.x = 150
self.y = 200
self.isJump = False
self.jumpCount = 10
self.walkCount = 0
font1 = pygame.font.SysFont('Arian',60,True)
text = font1.render('-3',1,(200,100,25))
win.blit(text,( 250 - (text.get_width()/2),250))
pygame.display.update()
i=0
while i<100:
pygame.time.delay(10)
i += 1
class projectile(object):
def __init__(self,x,y,radius,color,facing):
self.x = x
self.y = y
self.radius = radius
self.color = color
self.facing = facing
self.vel = 8 * facing
def draw(self):
pygame.draw.circle(win, self.color , (self.x,self.y), self.radius)
class enemy(object):
def __init__(self,x,y,width,height,end):
self.x = x
self.y = y
self.width = width
self.height = height
self.end = end
self.path = [self.x, self.end]
self.walkCount = 0
self.vel = 40 # increasing value of this variable will increase the difficulty level
#self.health = 10
self.box = (self.x + 16,self.y + 8,33,50)
self.visible = True
def draw(self):
self.move()
if self.visible:
if self.walkCount + 1 >= 33:
self.walkCount = 0
if self.vel > 0:
win.blit(walkRightV[self.walkCount//3],(self.x,self.y))
self.walkCount += 1
else:
win.blit(walkLeftV[self.walkCount//3],(self.x,self.y))
self.walkCount += 1
pygame.draw.rect(win, (255,0,0), (self.x+10,self.y - 10,50,5)) #red bar
neg = score if score>0 else 0
pygame.draw.rect(win, (0,0,255), (self.x+10,self.y - 10,50 - neg * 5,5)) #blue bar
pygame.draw.rect(win, black, (self.x+10,self.y - 10,50,5),1) #bar boundary
self.box = (self.x + 16,self.y + 8,33,50)
#pygame.draw.rect(win, black, self.box, 1)
def move(self):
if self.vel > 0:
if self.x + self.vel < self.path[1]+35:
self.x += self.vel
else:
self.vel = self.vel * -1
self.walkCount = 0
else:
if self.x + self.vel > 0 - self.width/2:
self.x += self.vel
else:
self.vel = self.vel * -1
self.walkCount = 0
def hit(self):
if score == 11:
self.visible = False
print('hit')
def drawGameWindow(): #drawing the whole game window
#global walkCount
win.blit(bg, (0,0) )
text = font.render('Score:' + str(score),1, black)
if villain.visible == False:
font1 = pygame.font.SysFont('Arian',100,True,True)
test = font1.render("You Won!!!",1,(255,0,0))
win.blit(test,(250,350))
fontName = pygame.font.SysFont('Algerian',25,True)
t = fontName.render('Godfather_rana V/S Noob Players :)',1,(200,126,50))
win.blit(t,(10,10))
win.blit(text, (770,10))
hero.draw()
villain.draw()
for bullet in bullets:
bullet.draw()
pygame.display.update()
score = 0
hero = player(150,200,64,64)
villain = enemy(200,400,64,64,800)
bullets = []
run = True
shootLoop = 0
font = pygame.font.SysFont('Arian', 20, True, True)
while run: #main loop
clock.tick(27)
#pygame.time.delay(150)
if shootLoop > 0:
shootLoop += 1
if shootLoop > 4:
shootLoop = 0
if villain.visible == True:
if hero.box[1] + hero.box[3] > villain.box[1] and hero.box[1] < villain.box[1] + villain.box[3]:
if hero.box[0] + hero.box[2] > villain.box[0] and hero.box[0] < villain.box[0] + villain.box[2]:
score -= 3
hero.hit()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
for bullet in bullets:
if villain.visible == True:
if bullet.y + bullet.radius > villain.box[1] and bullet.y - bullet.radius < villain.box[1] + villain.box[3]:
if bullet.x + bullet.radius > villain.box[0] and bullet.x - bullet.radius < villain.box[0] + villain.box[2]:
bullets.pop(bullets.index(bullet))
villain.hit()
hitSound.play()
score += 1
if bullet.x < 852 and bullet.x > 0:
bullet.x += bullet.vel
else:
bullets.pop(bullets.index(bullet))
keys = pygame.key.get_pressed()
if keys[pygame.K_x] and shootLoop == 0:
bulletSound.play()
if hero.left:
facing = -1
else:
facing = 1
if len(bullets) < 2: #for the number of bullets
bullets.append(projectile(round(hero.x + hero.width//2) , round(hero.y + hero.height//2) , 6, (40,0,0),facing))
shootLoop = 1
if keys[pygame.K_LEFT] and hero.x > hero.vel:
hero.x -= hero.vel
hero.left = True
hero.right = False
hero.standing = False
elif keys[pygame.K_RIGHT] and hero.x < 852 - hero.width - hero.vel:
hero.x += hero.vel
hero.right = True
hero.left = False
hero.standing = False
else:
#hero.right = False
#hero.left = False
hero.standing = True
hero.walkCount = 0
if not(hero.isJump):
if keys[pygame.K_UP] and hero.y > hero.vel:
hero.y -= hero.vel
if keys[pygame.K_DOWN] and hero.y < 480 - hero.height - hero.vel:
hero.y += hero.vel
if keys[pygame.K_SPACE]:
hero.isJump = True
hero.right = False
hero.left = False
hero.walkCount = 0
else:
if hero.jumpCount >= -10:
sign = 1
if hero.jumpCount < 0:
sign = -1
hero.y -= (hero.jumpCount ** 2) * 0.5 * sign
hero.jumpCount -= 1
else:
hero.jumpCount = 10
hero.isJump = False
drawGameWindow()
pygame.quit()
| [
"[email protected]"
] | |
0d6addfc897dc90923015e6a0ca9ba9784660471 | 243877a6d048e47640d610cda28cb70ad3cafb80 | /venv/lib/python3.6/site-packages/mercurial/debugcommands.py | cf493fae89fc9449eed5b8b796eb2d104724d9b8 | [] | no_license | CJX32/my_blog | 855ca961e4017dde605c108a6d2c84c57447af2c | c8556c0ed11b3a1c75d8a9e897db43bef94641e7 | refs/heads/master | 2020-12-23T15:22:35.755197 | 2020-01-31T12:53:05 | 2020-01-31T12:53:05 | 236,883,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138,264 | py | # debugcommands.py - command processing for debug* commands
#
# Copyright 2005-2016 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import codecs
import collections
import difflib
import errno
import operator
import os
import random
import re
import socket
import ssl
import stat
import string
import subprocess
import sys
import time
from .i18n import _
from .node import (
bin,
hex,
nullhex,
nullid,
nullrev,
short,
)
from .pycompat import (
getattr,
open,
)
from . import (
bundle2,
changegroup,
cmdutil,
color,
context,
copies,
dagparser,
encoding,
error,
exchange,
extensions,
filemerge,
filesetlang,
formatter,
hg,
httppeer,
localrepo,
lock as lockmod,
logcmdutil,
merge as mergemod,
obsolete,
obsutil,
phases,
policy,
pvec,
pycompat,
registrar,
repair,
revlog,
revset,
revsetlang,
scmutil,
setdiscovery,
simplemerge,
sshpeer,
sslutil,
streamclone,
templater,
treediscovery,
upgrade,
url as urlmod,
util,
vfs as vfsmod,
wireprotoframing,
wireprotoserver,
wireprotov2peer,
)
from .utils import (
cborutil,
compression,
dateutil,
procutil,
stringutil,
)
from .revlogutils import deltas as deltautil
release = lockmod.release
command = registrar.command()
@command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
def debugancestor(ui, repo, *args):
"""find the ancestor revision of two revisions in a given index"""
if len(args) == 3:
index, rev1, rev2 = args
r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
lookup = r.lookup
elif len(args) == 2:
if not repo:
raise error.Abort(
_(b'there is no Mercurial repository here (.hg not found)')
)
rev1, rev2 = args
r = repo.changelog
lookup = repo.lookup
else:
raise error.Abort(_(b'either two or three arguments required'))
a = r.ancestor(lookup(rev1), lookup(rev2))
ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
@command(b'debugapplystreamclonebundle', [], b'FILE')
def debugapplystreamclonebundle(ui, repo, fname):
"""apply a stream clone bundle file"""
f = hg.openpath(ui, fname)
gen = exchange.readbundle(ui, f, fname)
gen.apply(repo)
@command(
b'debugbuilddag',
[
(
b'm',
b'mergeable-file',
None,
_(b'add single file mergeable changes'),
),
(
b'o',
b'overwritten-file',
None,
_(b'add single file all revs overwrite'),
),
(b'n', b'new-file', None, _(b'add new file at each rev')),
],
_(b'[OPTION]... [TEXT]'),
)
def debugbuilddag(
ui,
repo,
text=None,
mergeable_file=False,
overwritten_file=False,
new_file=False,
):
"""builds a repo with a given DAG from scratch in the current empty repo
The description of the DAG is read from stdin if not given on the
command line.
Elements:
- "+n" is a linear run of n nodes based on the current default parent
- "." is a single node based on the current default parent
- "$" resets the default parent to null (implied at the start);
otherwise the default parent is always the last node created
- "<p" sets the default parent to the backref p
- "*p" is a fork at parent p, which is a backref
- "*p1/p2" is a merge of parents p1 and p2, which are backrefs
- "/p2" is a merge of the preceding node and p2
- ":tag" defines a local tag for the preceding node
- "@branch" sets the named branch for subsequent nodes
- "#...\\n" is a comment up to the end of the line
Whitespace between the above elements is ignored.
A backref is either
- a number n, which references the node curr-n, where curr is the current
node, or
- the name of a local tag you placed earlier using ":tag", or
- empty to denote the default parent.
All string valued-elements are either strictly alphanumeric, or must
be enclosed in double quotes ("..."), with "\\" as escape character.
"""
if text is None:
ui.status(_(b"reading DAG from stdin\n"))
text = ui.fin.read()
cl = repo.changelog
if len(cl) > 0:
raise error.Abort(_(b'repository is not empty'))
# determine number of revs in DAG
total = 0
for type, data in dagparser.parsedag(text):
if type == b'n':
total += 1
if mergeable_file:
linesperrev = 2
# make a file with k lines per rev
initialmergedlines = [
b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
]
initialmergedlines.append(b"")
tags = []
progress = ui.makeprogress(
_(b'building'), unit=_(b'revisions'), total=total
)
with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
at = -1
atbranch = b'default'
nodeids = []
id = 0
progress.update(id)
for type, data in dagparser.parsedag(text):
if type == b'n':
ui.note((b'node %s\n' % pycompat.bytestr(data)))
id, ps = data
files = []
filecontent = {}
p2 = None
if mergeable_file:
fn = b"mf"
p1 = repo[ps[0]]
if len(ps) > 1:
p2 = repo[ps[1]]
pa = p1.ancestor(p2)
base, local, other = [
x[fn].data() for x in (pa, p1, p2)
]
m3 = simplemerge.Merge3Text(base, local, other)
ml = [l.strip() for l in m3.merge_lines()]
ml.append(b"")
elif at > 0:
ml = p1[fn].data().split(b"\n")
else:
ml = initialmergedlines
ml[id * linesperrev] += b" r%i" % id
mergedtext = b"\n".join(ml)
files.append(fn)
filecontent[fn] = mergedtext
if overwritten_file:
fn = b"of"
files.append(fn)
filecontent[fn] = b"r%i\n" % id
if new_file:
fn = b"nf%i" % id
files.append(fn)
filecontent[fn] = b"r%i\n" % id
if len(ps) > 1:
if not p2:
p2 = repo[ps[1]]
for fn in p2:
if fn.startswith(b"nf"):
files.append(fn)
filecontent[fn] = p2[fn].data()
def fctxfn(repo, cx, path):
if path in filecontent:
return context.memfilectx(
repo, cx, path, filecontent[path]
)
return None
if len(ps) == 0 or ps[0] < 0:
pars = [None, None]
elif len(ps) == 1:
pars = [nodeids[ps[0]], None]
else:
pars = [nodeids[p] for p in ps]
cx = context.memctx(
repo,
pars,
b"r%i" % id,
files,
fctxfn,
date=(id, 0),
user=b"debugbuilddag",
extra={b'branch': atbranch},
)
nodeid = repo.commitctx(cx)
nodeids.append(nodeid)
at = id
elif type == b'l':
id, name = data
ui.note((b'tag %s\n' % name))
tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
elif type == b'a':
ui.note((b'branch %s\n' % data))
atbranch = data
progress.update(id)
if tags:
repo.vfs.write(b"localtags", b"".join(tags))
def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
indent_string = b' ' * indent
if all:
ui.writenoi18n(
b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
% indent_string
)
def showchunks(named):
ui.write(b"\n%s%s\n" % (indent_string, named))
for deltadata in gen.deltaiter():
node, p1, p2, cs, deltabase, delta, flags = deltadata
ui.write(
b"%s%s %s %s %s %s %d\n"
% (
indent_string,
hex(node),
hex(p1),
hex(p2),
hex(cs),
hex(deltabase),
len(delta),
)
)
chunkdata = gen.changelogheader()
showchunks(b"changelog")
chunkdata = gen.manifestheader()
showchunks(b"manifest")
for chunkdata in iter(gen.filelogheader, {}):
fname = chunkdata[b'filename']
showchunks(fname)
else:
if isinstance(gen, bundle2.unbundle20):
raise error.Abort(_(b'use debugbundle2 for this file'))
chunkdata = gen.changelogheader()
for deltadata in gen.deltaiter():
node, p1, p2, cs, deltabase, delta, flags = deltadata
ui.write(b"%s%s\n" % (indent_string, hex(node)))
def _debugobsmarkers(ui, part, indent=0, **opts):
"""display version and markers contained in 'data'"""
opts = pycompat.byteskwargs(opts)
data = part.read()
indent_string = b' ' * indent
try:
version, markers = obsolete._readmarkers(data)
except error.UnknownVersion as exc:
msg = b"%sunsupported version: %s (%d bytes)\n"
msg %= indent_string, exc.version, len(data)
ui.write(msg)
else:
msg = b"%sversion: %d (%d bytes)\n"
msg %= indent_string, version, len(data)
ui.write(msg)
fm = ui.formatter(b'debugobsolete', opts)
for rawmarker in sorted(markers):
m = obsutil.marker(None, rawmarker)
fm.startitem()
fm.plain(indent_string)
cmdutil.showmarker(fm, m)
fm.end()
def _debugphaseheads(ui, data, indent=0):
"""display version and markers contained in 'data'"""
indent_string = b' ' * indent
headsbyphase = phases.binarydecode(data)
for phase in phases.allphases:
for head in headsbyphase[phase]:
ui.write(indent_string)
ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
def _quasirepr(thing):
if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
return b'{%s}' % (
b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
)
return pycompat.bytestr(repr(thing))
def _debugbundle2(ui, gen, all=None, **opts):
"""lists the contents of a bundle2"""
if not isinstance(gen, bundle2.unbundle20):
raise error.Abort(_(b'not a bundle2 file'))
ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
parttypes = opts.get(r'part_type', [])
for part in gen.iterparts():
if parttypes and part.type not in parttypes:
continue
msg = b'%s -- %s (mandatory: %r)\n'
ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
if part.type == b'changegroup':
version = part.params.get(b'version', b'01')
cg = changegroup.getunbundler(version, part, b'UN')
if not ui.quiet:
_debugchangegroup(ui, cg, all=all, indent=4, **opts)
if part.type == b'obsmarkers':
if not ui.quiet:
_debugobsmarkers(ui, part, indent=4, **opts)
if part.type == b'phase-heads':
if not ui.quiet:
_debugphaseheads(ui, part, indent=4)
@command(
b'debugbundle',
[
(b'a', b'all', None, _(b'show all details')),
(b'', b'part-type', [], _(b'show only the named part type')),
(b'', b'spec', None, _(b'print the bundlespec of the bundle')),
],
_(b'FILE'),
norepo=True,
)
def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
"""lists the contents of a bundle"""
with hg.openpath(ui, bundlepath) as f:
if spec:
spec = exchange.getbundlespec(ui, f)
ui.write(b'%s\n' % spec)
return
gen = exchange.readbundle(ui, f, bundlepath)
if isinstance(gen, bundle2.unbundle20):
return _debugbundle2(ui, gen, all=all, **opts)
_debugchangegroup(ui, gen, all=all, **opts)
@command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
def debugcapabilities(ui, path, **opts):
"""lists the capabilities of a remote peer"""
opts = pycompat.byteskwargs(opts)
peer = hg.peer(ui, opts, path)
caps = peer.capabilities()
ui.writenoi18n(b'Main capabilities:\n')
for c in sorted(caps):
ui.write(b' %s\n' % c)
b2caps = bundle2.bundle2caps(peer)
if b2caps:
ui.writenoi18n(b'Bundle2 capabilities:\n')
for key, values in sorted(pycompat.iteritems(b2caps)):
ui.write(b' %s\n' % key)
for v in values:
ui.write(b' %s\n' % v)
@command(b'debugcheckstate', [], b'')
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
parent1, parent2 = repo.dirstate.parents()
m1 = repo[parent1].manifest()
m2 = repo[parent2].manifest()
errors = 0
for f in repo.dirstate:
state = repo.dirstate[f]
if state in b"nr" and f not in m1:
ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
errors += 1
if state in b"a" and f in m1:
ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
errors += 1
if state in b"m" and f not in m1 and f not in m2:
ui.warn(
_(b"%s in state %s, but not in either manifest\n") % (f, state)
)
errors += 1
for f in m1:
state = repo.dirstate[f]
if state not in b"nrm":
ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
errors += 1
if errors:
error = _(b".hg/dirstate inconsistent with current parent's manifest")
raise error.Abort(error)
@command(
b'debugcolor',
[(b'', b'style', None, _(b'show all configured styles'))],
b'hg debugcolor',
)
def debugcolor(ui, repo, **opts):
"""show available color, effects or style"""
ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
if opts.get(r'style'):
return _debugdisplaystyle(ui)
else:
return _debugdisplaycolor(ui)
def _debugdisplaycolor(ui):
ui = ui.copy()
ui._styles.clear()
for effect in color._activeeffects(ui).keys():
ui._styles[effect] = effect
if ui._terminfoparams:
for k, v in ui.configitems(b'color'):
if k.startswith(b'color.'):
ui._styles[k] = k[6:]
elif k.startswith(b'terminfo.'):
ui._styles[k] = k[9:]
ui.write(_(b'available colors:\n'))
# sort label with a '_' after the other to group '_background' entry.
items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
for colorname, label in items:
ui.write(b'%s\n' % colorname, label=label)
def _debugdisplaystyle(ui):
ui.write(_(b'available style:\n'))
if not ui._styles:
return
width = max(len(s) for s in ui._styles)
for label, effects in sorted(ui._styles.items()):
ui.write(b'%s' % label, label=label)
if effects:
# 50
ui.write(b': ')
ui.write(b' ' * (max(0, width - len(label))))
ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
ui.write(b'\n')
@command(b'debugcreatestreamclonebundle', [], b'FILE')
def debugcreatestreamclonebundle(ui, repo, fname):
"""create a stream clone bundle file
Stream bundles are special bundles that are essentially archives of
revlog files. They are commonly used for cloning very quickly.
"""
# TODO we may want to turn this into an abort when this functionality
# is moved into `hg bundle`.
if phases.hassecret(repo):
ui.warn(
_(
b'(warning: stream clone bundle will contain secret '
b'revisions)\n'
)
)
requirements, gen = streamclone.generatebundlev1(repo)
changegroup.writechunks(ui, gen, fname)
ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
@command(
b'debugdag',
[
(b't', b'tags', None, _(b'use tags as labels')),
(b'b', b'branches', None, _(b'annotate with branch names')),
(b'', b'dots', None, _(b'use dots for runs')),
(b's', b'spaces', None, _(b'separate elements by spaces')),
],
_(b'[OPTION]... [FILE [REV]...]'),
optionalrepo=True,
)
def debugdag(ui, repo, file_=None, *revs, **opts):
"""format the changelog or an index DAG as a concise textual description
If you pass a revlog index, the revlog's DAG is emitted. If you list
revision numbers, they get labeled in the output as rN.
Otherwise, the changelog DAG of the current repo is emitted.
"""
spaces = opts.get(r'spaces')
dots = opts.get(r'dots')
if file_:
rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
revs = set((int(r) for r in revs))
def events():
for r in rlog:
yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
if r in revs:
yield b'l', (r, b"r%i" % r)
elif repo:
cl = repo.changelog
tags = opts.get(r'tags')
branches = opts.get(r'branches')
if tags:
labels = {}
for l, n in repo.tags().items():
labels.setdefault(cl.rev(n), []).append(l)
def events():
b = b"default"
for r in cl:
if branches:
newb = cl.read(cl.node(r))[5][b'branch']
if newb != b:
yield b'a', newb
b = newb
yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
if tags:
ls = labels.get(r)
if ls:
for l in ls:
yield b'l', (r, l)
else:
raise error.Abort(_(b'need repo for changelog dag'))
for line in dagparser.dagtextlines(
events(),
addspaces=spaces,
wraplabels=True,
wrapannotations=True,
wrapnonlinear=dots,
usedots=dots,
maxlinewidth=70,
):
ui.write(line)
ui.write(b"\n")
@command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
def debugdata(ui, repo, file_, rev=None, **opts):
"""dump the contents of a data file revision"""
opts = pycompat.byteskwargs(opts)
if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
if rev is not None:
raise error.CommandError(b'debugdata', _(b'invalid arguments'))
file_, rev = None, file_
elif rev is None:
raise error.CommandError(b'debugdata', _(b'invalid arguments'))
r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
try:
ui.write(r.rawdata(r.lookup(rev)))
except KeyError:
raise error.Abort(_(b'invalid revision identifier %s') % rev)
@command(
b'debugdate',
[(b'e', b'extended', None, _(b'try extended date formats'))],
_(b'[-e] DATE [RANGE]'),
norepo=True,
optionalrepo=True,
)
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts[r"extended"]:
d = dateutil.parsedate(date, util.extendeddateformats)
else:
d = dateutil.parsedate(date)
ui.writenoi18n(b"internal: %d %d\n" % d)
ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
if range:
m = dateutil.matchdate(range)
ui.writenoi18n(b"match: %s\n" % m(d[0]))
@command(
b'debugdeltachain',
cmdutil.debugrevlogopts + cmdutil.formatteropts,
_(b'-c|-m|FILE'),
optionalrepo=True,
)
def debugdeltachain(ui, repo, file_=None, **opts):
"""dump information about delta chains in a revlog
Output can be templatized. Available template keywords are:
:``rev``: revision number
:``chainid``: delta chain identifier (numbered by unique base)
:``chainlen``: delta chain length to this revision
:``prevrev``: previous revision in delta chain
:``deltatype``: role of delta / how it was computed
:``compsize``: compressed size of revision
:``uncompsize``: uncompressed size of revision
:``chainsize``: total size of compressed revisions in chain
:``chainratio``: total chain size divided by uncompressed revision size
(new delta chains typically start at ratio 2.00)
:``lindist``: linear distance from base revision in delta chain to end
of this revision
:``extradist``: total size of revisions not part of this delta chain from
base of delta chain to end of this revision; a measurement
of how much extra data we need to read/seek across to read
the delta chain for this revision
:``extraratio``: extradist divided by chainsize; another representation of
how much unrelated data is needed to load this delta chain
If the repository is configured to use the sparse read, additional keywords
are available:
:``readsize``: total size of data read from the disk for a revision
(sum of the sizes of all the blocks)
:``largestblock``: size of the largest block of data read from the disk
:``readdensity``: density of useful bytes in the data read from the disk
:``srchunks``: in how many data hunks the whole revision would be read
The sparse read can be enabled with experimental.sparse-read = True
"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
index = r.index
start = r.start
length = r.length
generaldelta = r.version & revlog.FLAG_GENERALDELTA
withsparseread = getattr(r, '_withsparseread', False)
def revinfo(rev):
e = index[rev]
compsize = e[1]
uncompsize = e[2]
chainsize = 0
if generaldelta:
if e[3] == e[5]:
deltatype = b'p1'
elif e[3] == e[6]:
deltatype = b'p2'
elif e[3] == rev - 1:
deltatype = b'prev'
elif e[3] == rev:
deltatype = b'base'
else:
deltatype = b'other'
else:
if e[3] == rev:
deltatype = b'base'
else:
deltatype = b'prev'
chain = r._deltachain(rev)[0]
for iterrev in chain:
e = index[iterrev]
chainsize += e[1]
return compsize, uncompsize, deltatype, chain, chainsize
fm = ui.formatter(b'debugdeltachain', opts)
fm.plain(
b' rev chain# chainlen prev delta '
b'size rawsize chainsize ratio lindist extradist '
b'extraratio'
)
if withsparseread:
fm.plain(b' readsize largestblk rddensity srchunks')
fm.plain(b'\n')
chainbases = {}
for rev in r:
comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
chainbase = chain[0]
chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
basestart = start(chainbase)
revstart = start(rev)
lineardist = revstart + comp - basestart
extradist = lineardist - chainsize
try:
prevrev = chain[-2]
except IndexError:
prevrev = -1
if uncomp != 0:
chainratio = float(chainsize) / float(uncomp)
else:
chainratio = chainsize
if chainsize != 0:
extraratio = float(extradist) / float(chainsize)
else:
extraratio = extradist
fm.startitem()
fm.write(
b'rev chainid chainlen prevrev deltatype compsize '
b'uncompsize chainsize chainratio lindist extradist '
b'extraratio',
b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
rev,
chainid,
len(chain),
prevrev,
deltatype,
comp,
uncomp,
chainsize,
chainratio,
lineardist,
extradist,
extraratio,
rev=rev,
chainid=chainid,
chainlen=len(chain),
prevrev=prevrev,
deltatype=deltatype,
compsize=comp,
uncompsize=uncomp,
chainsize=chainsize,
chainratio=chainratio,
lindist=lineardist,
extradist=extradist,
extraratio=extraratio,
)
if withsparseread:
readsize = 0
largestblock = 0
srchunks = 0
for revschunk in deltautil.slicechunk(r, chain):
srchunks += 1
blkend = start(revschunk[-1]) + length(revschunk[-1])
blksize = blkend - start(revschunk[0])
readsize += blksize
if largestblock < blksize:
largestblock = blksize
if readsize:
readdensity = float(chainsize) / float(readsize)
else:
readdensity = 1
fm.write(
b'readsize largestblock readdensity srchunks',
b' %10d %10d %9.5f %8d',
readsize,
largestblock,
readdensity,
srchunks,
readsize=readsize,
largestblock=largestblock,
readdensity=readdensity,
srchunks=srchunks,
)
fm.plain(b'\n')
fm.end()
@command(
b'debugdirstate|debugstate',
[
(
b'',
b'nodates',
None,
_(b'do not display the saved mtime (DEPRECATED)'),
),
(b'', b'dates', True, _(b'display the saved mtime')),
(b'', b'datesort', None, _(b'sort by saved mtime')),
],
_(b'[OPTION]...'),
)
def debugstate(ui, repo, **opts):
"""show the contents of the current dirstate"""
nodates = not opts[r'dates']
if opts.get(r'nodates') is not None:
nodates = True
datesort = opts.get(r'datesort')
if datesort:
keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
else:
keyfunc = None # sort by filename
for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
if ent[3] == -1:
timestr = b'unset '
elif nodates:
timestr = b'set '
else:
timestr = time.strftime(
r"%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
)
timestr = encoding.strtolocal(timestr)
if ent[1] & 0o20000:
mode = b'lnk'
else:
mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
for f in repo.dirstate.copies():
ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
@command(
b'debugdiscovery',
[
(b'', b'old', None, _(b'use old-style discovery')),
(
b'',
b'nonheads',
None,
_(b'use old-style discovery with non-heads included'),
),
(b'', b'rev', [], b'restrict discovery to this set of revs'),
(b'', b'seed', b'12323', b'specify the random seed use for discovery'),
]
+ cmdutil.remoteopts,
_(b'[--rev REV] [OTHER]'),
)
def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
"""runs the changeset discovery protocol in isolation"""
opts = pycompat.byteskwargs(opts)
remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
remote = hg.peer(repo, opts, remoteurl)
ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
# make sure tests are repeatable
random.seed(int(opts[b'seed']))
if opts.get(b'old'):
def doit(pushedrevs, remoteheads, remote=remote):
if not util.safehasattr(remote, b'branches'):
# enable in-client legacy support
remote = localrepo.locallegacypeer(remote.local())
common, _in, hds = treediscovery.findcommonincoming(
repo, remote, force=True
)
common = set(common)
if not opts.get(b'nonheads'):
ui.writenoi18n(
b"unpruned common: %s\n"
% b" ".join(sorted(short(n) for n in common))
)
clnode = repo.changelog.node
common = repo.revs(b'heads(::%ln)', common)
common = {clnode(r) for r in common}
return common, hds
else:
def doit(pushedrevs, remoteheads, remote=remote):
nodes = None
if pushedrevs:
revs = scmutil.revrange(repo, pushedrevs)
nodes = [repo[r].node() for r in revs]
common, any, hds = setdiscovery.findcommonheads(
ui, repo, remote, ancestorsof=nodes
)
return common, hds
remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
localrevs = opts[b'rev']
with util.timedcm('debug-discovery') as t:
common, hds = doit(localrevs, remoterevs)
# compute all statistics
common = set(common)
rheads = set(hds)
lheads = set(repo.heads())
data = {}
data[b'elapsed'] = t.elapsed
data[b'nb-common'] = len(common)
data[b'nb-common-local'] = len(common & lheads)
data[b'nb-common-remote'] = len(common & rheads)
data[b'nb-common-both'] = len(common & rheads & lheads)
data[b'nb-local'] = len(lheads)
data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
data[b'nb-remote'] = len(rheads)
data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
data[b'nb-revs'] = len(repo.revs(b'all()'))
data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
# display discovery summary
ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
ui.writenoi18n(b"heads summary:\n")
ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
if ui.verbose:
ui.writenoi18n(
b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
)
_chunksize = 4 << 10
@command(
b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
)
def debugdownload(ui, repo, url, output=None, **opts):
"""download a resource using Mercurial logic and config
"""
fh = urlmod.open(ui, url, output)
dest = ui
if output:
dest = open(output, b"wb", _chunksize)
try:
data = fh.read(_chunksize)
while data:
dest.write(data)
data = fh.read(_chunksize)
finally:
if output:
dest.close()
@command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
def debugextensions(ui, repo, **opts):
'''show information about active extensions'''
opts = pycompat.byteskwargs(opts)
exts = extensions.extensions(ui)
hgver = util.version()
fm = ui.formatter(b'debugextensions', opts)
for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
isinternal = extensions.ismoduleinternal(extmod)
extsource = pycompat.fsencode(extmod.__file__)
if isinternal:
exttestedwith = [] # never expose magic string to users
else:
exttestedwith = getattr(extmod, 'testedwith', b'').split()
extbuglink = getattr(extmod, 'buglink', None)
fm.startitem()
if ui.quiet or ui.verbose:
fm.write(b'name', b'%s\n', extname)
else:
fm.write(b'name', b'%s', extname)
if isinternal or hgver in exttestedwith:
fm.plain(b'\n')
elif not exttestedwith:
fm.plain(_(b' (untested!)\n'))
else:
lasttestedversion = exttestedwith[-1]
fm.plain(b' (%s!)\n' % lasttestedversion)
fm.condwrite(
ui.verbose and extsource,
b'source',
_(b' location: %s\n'),
extsource or b"",
)
if ui.verbose:
fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
fm.data(bundled=isinternal)
fm.condwrite(
ui.verbose and exttestedwith,
b'testedwith',
_(b' tested with: %s\n'),
fm.formatlist(exttestedwith, name=b'ver'),
)
fm.condwrite(
ui.verbose and extbuglink,
b'buglink',
_(b' bug reporting: %s\n'),
extbuglink or b"",
)
fm.end()
@command(
b'debugfileset',
[
(
b'r',
b'rev',
b'',
_(b'apply the filespec on this revision'),
_(b'REV'),
),
(
b'',
b'all-files',
False,
_(b'test files from all revisions and working directory'),
),
(
b's',
b'show-matcher',
None,
_(b'print internal representation of matcher'),
),
(
b'p',
b'show-stage',
[],
_(b'print parsed tree at the given stage'),
_(b'NAME'),
),
],
_(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
)
def debugfileset(ui, repo, expr, **opts):
'''parse and apply a fileset specification'''
from . import fileset
fileset.symbols # force import of fileset so we have predicates to optimize
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
stages = [
(b'parsed', pycompat.identity),
(b'analyzed', filesetlang.analyze),
(b'optimized', filesetlang.optimize),
]
stagenames = set(n for n, f in stages)
showalways = set()
if ui.verbose and not opts[b'show_stage']:
# show parsed tree by --verbose (deprecated)
showalways.add(b'parsed')
if opts[b'show_stage'] == [b'all']:
showalways.update(stagenames)
else:
for n in opts[b'show_stage']:
if n not in stagenames:
raise error.Abort(_(b'invalid stage name: %s') % n)
showalways.update(opts[b'show_stage'])
tree = filesetlang.parse(expr)
for n, f in stages:
tree = f(tree)
if n in showalways:
if opts[b'show_stage'] or n != b'parsed':
ui.write(b"* %s:\n" % n)
ui.write(filesetlang.prettyformat(tree), b"\n")
files = set()
if opts[b'all_files']:
for r in repo:
c = repo[r]
files.update(c.files())
files.update(c.substate)
if opts[b'all_files'] or ctx.rev() is None:
wctx = repo[None]
files.update(
repo.dirstate.walk(
scmutil.matchall(repo),
subrepos=list(wctx.substate),
unknown=True,
ignored=True,
)
)
files.update(wctx.substate)
else:
files.update(ctx.files())
files.update(ctx.substate)
m = ctx.matchfileset(expr)
if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
for f in sorted(files):
if not m(f):
continue
ui.write(b"%s\n" % f)
@command(b'debugformat', [] + cmdutil.formatteropts)
def debugformat(ui, repo, **opts):
"""display format information about the current repository
Use --verbose to get extra information about current config value and
Mercurial default."""
opts = pycompat.byteskwargs(opts)
maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
maxvariantlength = max(len(b'format-variant'), maxvariantlength)
def makeformatname(name):
return b'%s:' + (b' ' * (maxvariantlength - len(name)))
fm = ui.formatter(b'debugformat', opts)
if fm.isplain():
def formatvalue(value):
if util.safehasattr(value, b'startswith'):
return value
if value:
return b'yes'
else:
return b'no'
else:
formatvalue = pycompat.identity
fm.plain(b'format-variant')
fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
fm.plain(b' repo')
if ui.verbose:
fm.plain(b' config default')
fm.plain(b'\n')
for fv in upgrade.allformatvariant:
fm.startitem()
repovalue = fv.fromrepo(repo)
configvalue = fv.fromconfig(repo)
if repovalue != configvalue:
namelabel = b'formatvariant.name.mismatchconfig'
repolabel = b'formatvariant.repo.mismatchconfig'
elif repovalue != fv.default:
namelabel = b'formatvariant.name.mismatchdefault'
repolabel = b'formatvariant.repo.mismatchdefault'
else:
namelabel = b'formatvariant.name.uptodate'
repolabel = b'formatvariant.repo.uptodate'
fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
if fv.default != configvalue:
configlabel = b'formatvariant.config.special'
else:
configlabel = b'formatvariant.config.default'
fm.condwrite(
ui.verbose,
b'config',
b' %6s',
formatvalue(configvalue),
label=configlabel,
)
fm.condwrite(
ui.verbose,
b'default',
b' %7s',
formatvalue(fv.default),
label=b'formatvariant.default',
)
fm.plain(b'\n')
fm.end()
@command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
def debugfsinfo(ui, path=b"."):
"""show information detected about current filesystem"""
ui.writenoi18n(b'path: %s\n' % path)
ui.writenoi18n(
b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
)
ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
ui.writenoi18n(
b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
)
ui.writenoi18n(
b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
)
casesensitive = b'(unknown)'
try:
with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
except OSError:
pass
ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
@command(
b'debuggetbundle',
[
(b'H', b'head', [], _(b'id of head node'), _(b'ID')),
(b'C', b'common', [], _(b'id of common node'), _(b'ID')),
(
b't',
b'type',
b'bzip2',
_(b'bundle compression type to use'),
_(b'TYPE'),
),
],
_(b'REPO FILE [-H|-C ID]...'),
norepo=True,
)
def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
"""retrieves a bundle from a repo
Every ID must be a full-length hex node id string. Saves the bundle to the
given file.
"""
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
if not repo.capable(b'getbundle'):
raise error.Abort(b"getbundle() not supported by target repository")
args = {}
if common:
args[r'common'] = [bin(s) for s in common]
if head:
args[r'heads'] = [bin(s) for s in head]
# TODO: get desired bundlecaps from command line.
args[r'bundlecaps'] = None
bundle = repo.getbundle(b'debug', **args)
bundletype = opts.get(b'type', b'bzip2').lower()
btypes = {
b'none': b'HG10UN',
b'bzip2': b'HG10BZ',
b'gzip': b'HG10GZ',
b'bundle2': b'HG20',
}
bundletype = btypes.get(bundletype)
if bundletype not in bundle2.bundletypes:
raise error.Abort(_(b'unknown bundle type specified with --type'))
bundle2.writebundle(ui, bundle, bundlepath, bundletype)
@command(b'debugignore', [], b'[FILE]')
def debugignore(ui, repo, *files, **opts):
"""display the combined ignore pattern and information about ignored files
With no argument display the combined ignore pattern.
Given space separated file names, shows if the given file is ignored and
if so, show the ignore rule (file and line number) that matched it.
"""
ignore = repo.dirstate._ignore
if not files:
# Show all the patterns
ui.write(b"%s\n" % pycompat.byterepr(ignore))
else:
m = scmutil.match(repo[None], pats=files)
uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
for f in m.files():
nf = util.normpath(f)
ignored = None
ignoredata = None
if nf != b'.':
if ignore(nf):
ignored = nf
ignoredata = repo.dirstate._ignorefileandline(nf)
else:
for p in util.finddirs(nf):
if ignore(p):
ignored = p
ignoredata = repo.dirstate._ignorefileandline(p)
break
if ignored:
if ignored == nf:
ui.write(_(b"%s is ignored\n") % uipathfn(f))
else:
ui.write(
_(
b"%s is ignored because of "
b"containing directory %s\n"
)
% (uipathfn(f), ignored)
)
ignorefile, lineno, line = ignoredata
ui.write(
_(b"(ignore rule in %s, line %d: '%s')\n")
% (ignorefile, lineno, line)
)
else:
ui.write(_(b"%s is not ignored\n") % uipathfn(f))
@command(
b'debugindex',
cmdutil.debugrevlogopts + cmdutil.formatteropts,
_(b'-c|-m|FILE'),
)
def debugindex(ui, repo, file_=None, **opts):
"""dump index data for a storage primitive"""
opts = pycompat.byteskwargs(opts)
store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
if ui.debugflag:
shortfn = hex
else:
shortfn = short
idlen = 12
for i in store:
idlen = len(shortfn(store.node(i)))
break
fm = ui.formatter(b'debugindex', opts)
fm.plain(
b' rev linkrev %s %s p2\n'
% (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
)
for rev in store:
node = store.node(rev)
parents = store.parents(node)
fm.startitem()
fm.write(b'rev', b'%6d ', rev)
fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
fm.write(b'node', b'%s ', shortfn(node))
fm.write(b'p1', b'%s ', shortfn(parents[0]))
fm.write(b'p2', b'%s', shortfn(parents[1]))
fm.plain(b'\n')
fm.end()
@command(
b'debugindexdot',
cmdutil.debugrevlogopts,
_(b'-c|-m|FILE'),
optionalrepo=True,
)
def debugindexdot(ui, repo, file_=None, **opts):
"""dump an index DAG as a graphviz dot file"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
ui.writenoi18n(b"digraph G {\n")
for i in r:
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
if pp[1] != nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@command(b'debugindexstats', [])
def debugindexstats(ui, repo):
"""show stats related to the changelog index"""
repo.changelog.shortest(nullid, 1)
index = repo.changelog.index
if not util.safehasattr(index, b'stats'):
raise error.Abort(_(b'debugindexstats only works with native code'))
for k, v in sorted(index.stats().items()):
ui.write(b'%s: %d\n' % (k, v))
@command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
def debuginstall(ui, **opts):
'''test Mercurial installation
Returns 0 on success.
'''
opts = pycompat.byteskwargs(opts)
problems = 0
fm = ui.formatter(b'debuginstall', opts)
fm.startitem()
# encoding
fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
err = None
try:
codecs.lookup(pycompat.sysstr(encoding.encoding))
except LookupError as inst:
err = stringutil.forcebytestr(inst)
problems += 1
fm.condwrite(
err,
b'encodingerror',
_(b" %s\n (check that your locale is properly set)\n"),
err,
)
# Python
fm.write(
b'pythonexe',
_(b"checking Python executable (%s)\n"),
pycompat.sysexecutable or _(b"unknown"),
)
fm.write(
b'pythonver',
_(b"checking Python version (%s)\n"),
(b"%d.%d.%d" % sys.version_info[:3]),
)
fm.write(
b'pythonlib',
_(b"checking Python lib (%s)...\n"),
os.path.dirname(pycompat.fsencode(os.__file__)),
)
security = set(sslutil.supportedprotocols)
if sslutil.hassni:
security.add(b'sni')
fm.write(
b'pythonsecurity',
_(b"checking Python security support (%s)\n"),
fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
)
# These are warnings, not errors. So don't increment problem count. This
# may change in the future.
if b'tls1.2' not in security:
fm.plain(
_(
b' TLS 1.2 not supported by Python install; '
b'network connections lack modern security\n'
)
)
if b'sni' not in security:
fm.plain(
_(
b' SNI not supported by Python install; may have '
b'connectivity issues with some servers\n'
)
)
# TODO print CA cert info
# hg version
hgver = util.version()
fm.write(
b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
)
fm.write(
b'hgverextra',
_(b"checking Mercurial custom build (%s)\n"),
b'+'.join(hgver.split(b'+')[1:]),
)
# compiled modules
fm.write(
b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
)
fm.write(
b'hgmodules',
_(b"checking installed modules (%s)...\n"),
os.path.dirname(pycompat.fsencode(__file__)),
)
rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
rustext = rustandc # for now, that's the only case
cext = policy.policy in (b'c', b'allow') or rustandc
nopure = cext or rustext
if nopure:
err = None
try:
if cext:
from .cext import (
base85,
bdiff,
mpatch,
osutil,
)
# quiet pyflakes
dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
if rustext:
from .rustext import (
ancestor,
dirstate,
)
dir(ancestor), dir(dirstate) # quiet pyflakes
except Exception as inst:
err = stringutil.forcebytestr(inst)
problems += 1
fm.condwrite(err, b'extensionserror', b" %s\n", err)
compengines = util.compengines._engines.values()
fm.write(
b'compengines',
_(b'checking registered compression engines (%s)\n'),
fm.formatlist(
sorted(e.name() for e in compengines),
name=b'compengine',
fmt=b'%s',
sep=b', ',
),
)
fm.write(
b'compenginesavail',
_(b'checking available compression engines (%s)\n'),
fm.formatlist(
sorted(e.name() for e in compengines if e.available()),
name=b'compengine',
fmt=b'%s',
sep=b', ',
),
)
wirecompengines = compression.compengines.supportedwireengines(
compression.SERVERROLE
)
fm.write(
b'compenginesserver',
_(
b'checking available compression engines '
b'for wire protocol (%s)\n'
),
fm.formatlist(
[e.name() for e in wirecompengines if e.wireprotosupport()],
name=b'compengine',
fmt=b'%s',
sep=b', ',
),
)
re2 = b'missing'
if util._re2:
re2 = b'available'
fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
fm.data(re2=bool(util._re2))
# templates
p = templater.templatepaths()
fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
fm.condwrite(not p, b'', _(b" no template directories found\n"))
if p:
m = templater.templatepath(b"map-cmdline.default")
if m:
# template found, check if it is working
err = None
try:
templater.templater.frommapfile(m)
except Exception as inst:
err = stringutil.forcebytestr(inst)
p = None
fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
else:
p = None
fm.condwrite(
p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
)
fm.condwrite(
not m,
b'defaulttemplatenotfound',
_(b" template '%s' not found\n"),
b"default",
)
if not p:
problems += 1
fm.condwrite(
not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
)
# editor
editor = ui.geteditor()
editor = util.expandpath(editor)
editorbin = procutil.shellsplit(editor)[0]
fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
cmdpath = procutil.findexe(editorbin)
fm.condwrite(
not cmdpath and editor == b'vi',
b'vinotfound',
_(
b" No commit editor set and can't find %s in PATH\n"
b" (specify a commit editor in your configuration"
b" file)\n"
),
not cmdpath and editor == b'vi' and editorbin,
)
fm.condwrite(
not cmdpath and editor != b'vi',
b'editornotfound',
_(
b" Can't find editor '%s' in PATH\n"
b" (specify a commit editor in your configuration"
b" file)\n"
),
not cmdpath and editorbin,
)
if not cmdpath and editor != b'vi':
problems += 1
# check username
username = None
err = None
try:
username = ui.username()
except error.Abort as e:
err = stringutil.forcebytestr(e)
problems += 1
fm.condwrite(
username, b'username', _(b"checking username (%s)\n"), username
)
fm.condwrite(
err,
b'usernameerror',
_(
b"checking username...\n %s\n"
b" (specify a username in your configuration file)\n"
),
err,
)
for name, mod in extensions.extensions():
handler = getattr(mod, 'debuginstall', None)
if handler is not None:
problems += handler(ui, fm)
fm.condwrite(not problems, b'', _(b"no problems detected\n"))
if not problems:
fm.data(problems=problems)
fm.condwrite(
problems,
b'problems',
_(b"%d problems detected, please check your install!\n"),
problems,
)
fm.end()
return problems
@command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
def debugknown(ui, repopath, *ids, **opts):
"""test whether node ids are known to a repo
Every ID must be a full-length hex node id string. Returns a list of 0s
and 1s indicating unknown/known.
"""
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
if not repo.capable(b'known'):
raise error.Abort(b"known() not supported by target repository")
flags = repo.known([bin(s) for s in ids])
ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
@command(b'debuglabelcomplete', [], _(b'LABEL...'))
def debuglabelcomplete(ui, repo, *args):
'''backwards compatibility with old bash completion scripts (DEPRECATED)'''
debugnamecomplete(ui, repo, *args)
@command(
b'debuglocks',
[
(b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
(
b'W',
b'force-wlock',
None,
_(b'free the working state lock (DANGEROUS)'),
),
(b's', b'set-lock', None, _(b'set the store lock until stopped')),
(
b'S',
b'set-wlock',
None,
_(b'set the working state lock until stopped'),
),
],
_(b'[OPTION]...'),
)
def debuglocks(ui, repo, **opts):
"""show or modify state of locks
By default, this command will show which locks are held. This
includes the user and process holding the lock, the amount of time
the lock has been held, and the machine name where the process is
running if it's not local.
Locks protect the integrity of Mercurial's data, so should be
treated with care. System crashes or other interruptions may cause
locks to not be properly released, though Mercurial will usually
detect and remove such stale locks automatically.
However, detecting stale locks may not always be possible (for
instance, on a shared filesystem). Removing locks may also be
blocked by filesystem permissions.
Setting a lock will prevent other commands from changing the data.
The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
The set locks are removed when the command exits.
Returns 0 if no locks are held.
"""
if opts.get(r'force_lock'):
repo.svfs.unlink(b'lock')
if opts.get(r'force_wlock'):
repo.vfs.unlink(b'wlock')
if opts.get(r'force_lock') or opts.get(r'force_wlock'):
return 0
locks = []
try:
if opts.get(r'set_wlock'):
try:
locks.append(repo.wlock(False))
except error.LockHeld:
raise error.Abort(_(b'wlock is already held'))
if opts.get(r'set_lock'):
try:
locks.append(repo.lock(False))
except error.LockHeld:
raise error.Abort(_(b'lock is already held'))
if len(locks):
ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
return 0
finally:
release(*locks)
now = time.time()
held = 0
def report(vfs, name, method):
# this causes stale locks to get reaped for more accurate reporting
try:
l = method(False)
except error.LockHeld:
l = None
if l:
l.release()
else:
try:
st = vfs.lstat(name)
age = now - st[stat.ST_MTIME]
user = util.username(st.st_uid)
locker = vfs.readlock(name)
if b":" in locker:
host, pid = locker.split(b':')
if host == socket.gethostname():
locker = b'user %s, process %s' % (user or b'None', pid)
else:
locker = b'user %s, process %s, host %s' % (
user or b'None',
pid,
host,
)
ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
return 1
except OSError as e:
if e.errno != errno.ENOENT:
raise
ui.writenoi18n(b"%-6s free\n" % (name + b":"))
return 0
held += report(repo.svfs, b"lock", repo.lock)
held += report(repo.vfs, b"wlock", repo.wlock)
return held
@command(
b'debugmanifestfulltextcache',
[
(b'', b'clear', False, _(b'clear the cache')),
(
b'a',
b'add',
[],
_(b'add the given manifest nodes to the cache'),
_(b'NODE'),
),
],
b'',
)
def debugmanifestfulltextcache(ui, repo, add=(), **opts):
"""show, clear or amend the contents of the manifest fulltext cache"""
def getcache():
r = repo.manifestlog.getstorage(b'')
try:
return r._fulltextcache
except AttributeError:
msg = _(
b"Current revlog implementation doesn't appear to have a "
b"manifest fulltext cache\n"
)
raise error.Abort(msg)
if opts.get(r'clear'):
with repo.wlock():
cache = getcache()
cache.clear(clear_persisted_data=True)
return
if add:
with repo.wlock():
m = repo.manifestlog
store = m.getstorage(b'')
for n in add:
try:
manifest = m[store.lookup(n)]
except error.LookupError as e:
raise error.Abort(e, hint=b"Check your manifest node id")
manifest.read() # stores revisision in cache too
return
cache = getcache()
if not len(cache):
ui.write(_(b'cache empty\n'))
else:
ui.write(
_(
b'cache contains %d manifest entries, in order of most to '
b'least recent:\n'
)
% (len(cache),)
)
totalsize = 0
for nodeid in cache:
# Use cache.get to not update the LRU order
data = cache.peek(nodeid)
size = len(data)
totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
ui.write(
_(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
)
ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
ui.write(
_(b'total cache data size %s, on-disk %s\n')
% (util.bytecount(totalsize), util.bytecount(ondisk))
)
@command(b'debugmergestate', [], b'')
def debugmergestate(ui, repo, *args):
"""print merge state
Use --verbose to print out information about whether v1 or v2 merge state
was chosen."""
def _hashornull(h):
if h == nullhex:
return b'null'
else:
return h
def printrecords(version):
ui.writenoi18n(b'* version %d records\n' % version)
if version == 1:
records = v1records
else:
records = v2records
for rtype, record in records:
# pretty print some record types
if rtype == b'L':
ui.writenoi18n(b'local: %s\n' % record)
elif rtype == b'O':
ui.writenoi18n(b'other: %s\n' % record)
elif rtype == b'm':
driver, mdstate = record.split(b'\0', 1)
ui.writenoi18n(
b'merge driver: %s (state "%s")\n' % (driver, mdstate)
)
elif rtype in b'FDC':
r = record.split(b'\0')
f, state, hash, lfile, afile, anode, ofile = r[0:7]
if version == 1:
onode = b'not stored in v1 format'
flags = r[7]
else:
onode, flags = r[7:9]
ui.writenoi18n(
b'file: %s (record type "%s", state "%s", hash %s)\n'
% (f, rtype, state, _hashornull(hash))
)
ui.writenoi18n(
b' local path: %s (flags "%s")\n' % (lfile, flags)
)
ui.writenoi18n(
b' ancestor path: %s (node %s)\n'
% (afile, _hashornull(anode))
)
ui.writenoi18n(
b' other path: %s (node %s)\n'
% (ofile, _hashornull(onode))
)
elif rtype == b'f':
filename, rawextras = record.split(b'\0', 1)
extras = rawextras.split(b'\0')
i = 0
extrastrings = []
while i < len(extras):
extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
i += 2
ui.writenoi18n(
b'file extras: %s (%s)\n'
% (filename, b', '.join(extrastrings))
)
elif rtype == b'l':
labels = record.split(b'\0', 2)
labels = [l for l in labels if len(l) > 0]
ui.writenoi18n(b'labels:\n')
ui.write((b' local: %s\n' % labels[0]))
ui.write((b' other: %s\n' % labels[1]))
if len(labels) > 2:
ui.write((b' base: %s\n' % labels[2]))
else:
ui.writenoi18n(
b'unrecognized entry: %s\t%s\n'
% (rtype, record.replace(b'\0', b'\t'))
)
# Avoid mergestate.read() since it may raise an exception for unsupported
# merge state records. We shouldn't be doing this, but this is OK since this
# command is pretty low-level.
ms = mergemod.mergestate(repo)
# sort so that reasonable information is on top
v1records = ms._readrecordsv1()
v2records = ms._readrecordsv2()
order = b'LOml'
def key(r):
idx = order.find(r[0])
if idx == -1:
return (1, r[1])
else:
return (0, idx)
v1records.sort(key=key)
v2records.sort(key=key)
if not v1records and not v2records:
ui.writenoi18n(b'no merge state found\n')
elif not v2records:
ui.notenoi18n(b'no version 2 merge state\n')
printrecords(1)
elif ms._v1v2match(v1records, v2records):
ui.notenoi18n(b'v1 and v2 states match: using v2\n')
printrecords(2)
else:
ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
printrecords(1)
if ui.verbose:
printrecords(2)
@command(b'debugnamecomplete', [], _(b'NAME...'))
def debugnamecomplete(ui, repo, *args):
'''complete "names" - tags, open branch names, bookmark names'''
names = set()
# since we previously only listed open branches, we will handle that
# specially (after this for loop)
for name, ns in pycompat.iteritems(repo.names):
if name != b'branches':
names.update(ns.listnames(repo))
names.update(
tag
for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
if not closed
)
completions = set()
if not args:
args = [b'']
for a in args:
completions.update(n for n in names if n.startswith(a))
ui.write(b'\n'.join(sorted(completions)))
ui.write(b'\n')
@command(
b'debugobsolete',
[
(b'', b'flags', 0, _(b'markers flag')),
(
b'',
b'record-parents',
False,
_(b'record parent information for the precursor'),
),
(b'r', b'rev', [], _(b'display markers relevant to REV')),
(
b'',
b'exclusive',
False,
_(b'restrict display to markers only relevant to REV'),
),
(b'', b'index', False, _(b'display index of the marker')),
(b'', b'delete', [], _(b'delete markers specified by indices')),
]
+ cmdutil.commitopts2
+ cmdutil.formatteropts,
_(b'[OBSOLETED [REPLACEMENT ...]]'),
)
def debugobsolete(ui, repo, precursor=None, *successors, **opts):
"""create arbitrary obsolete marker
With no arguments, displays the list of obsolescence markers."""
opts = pycompat.byteskwargs(opts)
def parsenodeid(s):
try:
# We do not use revsingle/revrange functions here to accept
# arbitrary node identifiers, possibly not present in the
# local repository.
n = bin(s)
if len(n) != len(nullid):
raise TypeError()
return n
except TypeError:
raise error.Abort(
b'changeset references must be full hexadecimal '
b'node identifiers'
)
if opts.get(b'delete'):
indices = []
for v in opts.get(b'delete'):
try:
indices.append(int(v))
except ValueError:
raise error.Abort(
_(b'invalid index value: %r') % v,
hint=_(b'use integers for indices'),
)
if repo.currenttransaction():
raise error.Abort(
_(b'cannot delete obsmarkers in the middle of transaction.')
)
with repo.lock():
n = repair.deleteobsmarkers(repo.obsstore, indices)
ui.write(_(b'deleted %i obsolescence markers\n') % n)
return
if precursor is not None:
if opts[b'rev']:
raise error.Abort(b'cannot select revision when creating marker')
metadata = {}
metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
succs = tuple(parsenodeid(succ) for succ in successors)
l = repo.lock()
try:
tr = repo.transaction(b'debugobsolete')
try:
date = opts.get(b'date')
if date:
date = dateutil.parsedate(date)
else:
date = None
prec = parsenodeid(precursor)
parents = None
if opts[b'record_parents']:
if prec not in repo.unfiltered():
raise error.Abort(
b'cannot used --record-parents on '
b'unknown changesets'
)
parents = repo.unfiltered()[prec].parents()
parents = tuple(p.node() for p in parents)
repo.obsstore.create(
tr,
prec,
succs,
opts[b'flags'],
parents=parents,
date=date,
metadata=metadata,
ui=ui,
)
tr.close()
except ValueError as exc:
raise error.Abort(
_(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
)
finally:
tr.release()
finally:
l.release()
else:
if opts[b'rev']:
revs = scmutil.revrange(repo, opts[b'rev'])
nodes = [repo[r].node() for r in revs]
markers = list(
obsutil.getmarkers(
repo, nodes=nodes, exclusive=opts[b'exclusive']
)
)
markers.sort(key=lambda x: x._data)
else:
markers = obsutil.getmarkers(repo)
markerstoiter = markers
isrelevant = lambda m: True
if opts.get(b'rev') and opts.get(b'index'):
markerstoiter = obsutil.getmarkers(repo)
markerset = set(markers)
isrelevant = lambda m: m in markerset
fm = ui.formatter(b'debugobsolete', opts)
for i, m in enumerate(markerstoiter):
if not isrelevant(m):
# marker can be irrelevant when we're iterating over a set
# of markers (markerstoiter) which is bigger than the set
# of markers we want to display (markers)
# this can happen if both --index and --rev options are
# provided and thus we need to iterate over all of the markers
# to get the correct indices, but only display the ones that
# are relevant to --rev value
continue
fm.startitem()
ind = i if opts.get(b'index') else None
cmdutil.showmarker(fm, m, index=ind)
fm.end()
@command(
b'debugp1copies',
[(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
_(b'[-r REV]'),
)
def debugp1copies(ui, repo, **opts):
"""dump copy information compared to p1"""
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
for dst, src in ctx.p1copies().items():
ui.write(b'%s -> %s\n' % (src, dst))
@command(
b'debugp2copies',
[(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
_(b'[-r REV]'),
)
def debugp1copies(ui, repo, **opts):
"""dump copy information compared to p2"""
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
for dst, src in ctx.p2copies().items():
ui.write(b'%s -> %s\n' % (src, dst))
@command(
b'debugpathcomplete',
[
(b'f', b'full', None, _(b'complete an entire path')),
(b'n', b'normal', None, _(b'show only normal files')),
(b'a', b'added', None, _(b'show only added files')),
(b'r', b'removed', None, _(b'show only removed files')),
],
_(b'FILESPEC...'),
)
def debugpathcomplete(ui, repo, *specs, **opts):
'''complete part or all of a tracked path
This command supports shells that offer path name completion. It
currently completes only files already known to the dirstate.
Completion extends only to the next path segment unless
--full is specified, in which case entire paths are used.'''
def complete(path, acceptable):
dirstate = repo.dirstate
spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
rootdir = repo.root + pycompat.ossep
if spec != repo.root and not spec.startswith(rootdir):
return [], []
if os.path.isdir(spec):
spec += b'/'
spec = spec[len(rootdir) :]
fixpaths = pycompat.ossep != b'/'
if fixpaths:
spec = spec.replace(pycompat.ossep, b'/')
speclen = len(spec)
fullpaths = opts[r'full']
files, dirs = set(), set()
adddir, addfile = dirs.add, files.add
for f, st in pycompat.iteritems(dirstate):
if f.startswith(spec) and st[0] in acceptable:
if fixpaths:
f = f.replace(b'/', pycompat.ossep)
if fullpaths:
addfile(f)
continue
s = f.find(pycompat.ossep, speclen)
if s >= 0:
adddir(f[:s])
else:
addfile(f)
return files, dirs
acceptable = b''
if opts[r'normal']:
acceptable += b'nm'
if opts[r'added']:
acceptable += b'a'
if opts[r'removed']:
acceptable += b'r'
cwd = repo.getcwd()
if not specs:
specs = [b'.']
files, dirs = set(), set()
for spec in specs:
f, d = complete(spec, acceptable or b'nmar')
files.update(f)
dirs.update(d)
files.update(dirs)
ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
ui.write(b'\n')
@command(
b'debugpathcopies',
cmdutil.walkopts,
b'hg debugpathcopies REV1 REV2 [FILE]',
inferrepo=True,
)
def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
"""show copies between two revisions"""
ctx1 = scmutil.revsingle(repo, rev1)
ctx2 = scmutil.revsingle(repo, rev2)
m = scmutil.match(ctx1, pats, opts)
for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
ui.write(b'%s -> %s\n' % (src, dst))
@command(b'debugpeer', [], _(b'PATH'), norepo=True)
def debugpeer(ui, path):
"""establish a connection to a peer repository"""
# Always enable peer request logging. Requires --debug to display
# though.
overrides = {
(b'devel', b'debug.peer-request'): True,
}
with ui.configoverride(overrides):
peer = hg.peer(ui, {}, path)
local = peer.local() is not None
canpush = peer.canpush()
ui.write(_(b'url: %s\n') % peer.url())
ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
@command(
b'debugpickmergetool',
[
(b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
(b'', b'changedelete', None, _(b'emulate merging change and delete')),
]
+ cmdutil.walkopts
+ cmdutil.mergetoolopts,
_(b'[PATTERN]...'),
inferrepo=True,
)
def debugpickmergetool(ui, repo, *pats, **opts):
"""examine which merge tool is chosen for specified file
As described in :hg:`help merge-tools`, Mercurial examines
configurations below in this order to decide which merge tool is
chosen for specified file.
1. ``--tool`` option
2. ``HGMERGE`` environment variable
3. configurations in ``merge-patterns`` section
4. configuration of ``ui.merge``
5. configurations in ``merge-tools`` section
6. ``hgmerge`` tool (for historical reason only)
7. default tool for fallback (``:merge`` or ``:prompt``)
This command writes out examination result in the style below::
FILE = MERGETOOL
By default, all files known in the first parent context of the
working directory are examined. Use file patterns and/or -I/-X
options to limit target files. -r/--rev is also useful to examine
files in another context without actual updating to it.
With --debug, this command shows warning messages while matching
against ``merge-patterns`` and so on, too. It is recommended to
use this option with explicit file patterns and/or -I/-X options,
because this option increases amount of output per file according
to configurations in hgrc.
With -v/--verbose, this command shows configurations below at
first (only if specified).
- ``--tool`` option
- ``HGMERGE`` environment variable
- configuration of ``ui.merge``
If merge tool is chosen before matching against
``merge-patterns``, this command can't show any helpful
information, even with --debug. In such case, information above is
useful to know why a merge tool is chosen.
"""
opts = pycompat.byteskwargs(opts)
overrides = {}
if opts[b'tool']:
overrides[(b'ui', b'forcemerge')] = opts[b'tool']
ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
with ui.configoverride(overrides, b'debugmergepatterns'):
hgmerge = encoding.environ.get(b"HGMERGE")
if hgmerge is not None:
ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
uimerge = ui.config(b"ui", b"merge")
if uimerge:
ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
ctx = scmutil.revsingle(repo, opts.get(b'rev'))
m = scmutil.match(ctx, pats, opts)
changedelete = opts[b'changedelete']
for path in ctx.walk(m):
fctx = ctx[path]
try:
if not ui.debugflag:
ui.pushbuffer(error=True)
tool, toolpath = filemerge._picktool(
repo,
ui,
path,
fctx.isbinary(),
b'l' in fctx.flags(),
changedelete,
)
finally:
if not ui.debugflag:
ui.popbuffer()
ui.write(b'%s = %s\n' % (path, tool))
@command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
'''access the pushkey key/value protocol
With two args, list the keys in the given namespace.
With five args, set a key to new if it currently is set to old.
Reports success or failure.
'''
target = hg.peer(ui, {}, repopath)
if keyinfo:
key, old, new = keyinfo
with target.commandexecutor() as e:
r = e.callcommand(
b'pushkey',
{
b'namespace': namespace,
b'key': key,
b'old': old,
b'new': new,
},
).result()
ui.status(pycompat.bytestr(r) + b'\n')
return not r
else:
for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
ui.write(
b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
)
@command(b'debugpvec', [], _(b'A B'))
def debugpvec(ui, repo, a, b=None):
ca = scmutil.revsingle(repo, a)
cb = scmutil.revsingle(repo, b)
pa = pvec.ctxpvec(ca)
pb = pvec.ctxpvec(cb)
if pa == pb:
rel = b"="
elif pa > pb:
rel = b">"
elif pa < pb:
rel = b"<"
elif pa | pb:
rel = b"|"
ui.write(_(b"a: %s\n") % pa)
ui.write(_(b"b: %s\n") % pb)
ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
ui.write(
_(b"delta: %d hdist: %d distance: %d relation: %s\n")
% (
abs(pa._depth - pb._depth),
pvec._hamming(pa._vec, pb._vec),
pa.distance(pb),
rel,
)
)
@command(
b'debugrebuilddirstate|debugrebuildstate',
[
(b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
(
b'',
b'minimal',
None,
_(
b'only rebuild files that are inconsistent with '
b'the working copy parent'
),
),
],
_(b'[-r REV]'),
)
def debugrebuilddirstate(ui, repo, rev, **opts):
"""rebuild the dirstate as it would look like for the given revision
If no revision is specified the first current parent will be used.
The dirstate will be set to the files of the given revision.
The actual working directory content or existing dirstate
information such as adds or removes is not considered.
``minimal`` will only rebuild the dirstate status for files that claim to be
tracked but are not in the parent manifest, or that exist in the parent
manifest but are not in the dirstate. It will not change adds, removes, or
modified files that are in the working copy parent.
One use of this command is to make the next :hg:`status` invocation
check the actual file content.
"""
ctx = scmutil.revsingle(repo, rev)
with repo.wlock():
dirstate = repo.dirstate
changedfiles = None
# See command doc for what minimal does.
if opts.get(r'minimal'):
manifestfiles = set(ctx.manifest().keys())
dirstatefiles = set(dirstate)
manifestonly = manifestfiles - dirstatefiles
dsonly = dirstatefiles - manifestfiles
dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
changedfiles = manifestonly | dsnotadded
dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
@command(b'debugrebuildfncache', [], b'')
def debugrebuildfncache(ui, repo):
"""rebuild the fncache file"""
repair.rebuildfncache(ui, repo)
@command(
b'debugrename',
[(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
_(b'[-r REV] [FILE]...'),
)
def debugrename(ui, repo, *pats, **opts):
"""dump rename information"""
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get(b'rev'))
m = scmutil.match(ctx, pats, opts)
for abs in ctx.walk(m):
fctx = ctx[abs]
o = fctx.filelog().renamed(fctx.filenode())
rel = repo.pathto(abs)
if o:
ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
else:
ui.write(_(b"%s not renamed\n") % rel)
@command(
b'debugrevlog',
cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
_(b'-c|-m|FILE'),
optionalrepo=True,
)
def debugrevlog(ui, repo, file_=None, **opts):
"""show data and statistics about a revlog"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
if opts.get(b"dump"):
numrevs = len(r)
ui.write(
(
b"# rev p1rev p2rev start end deltastart base p1 p2"
b" rawsize totalsize compression heads chainlen\n"
)
)
ts = 0
heads = set()
for rev in pycompat.xrange(numrevs):
dbase = r.deltaparent(rev)
if dbase == -1:
dbase = rev
cbase = r.chainbase(rev)
clen = r.chainlen(rev)
p1, p2 = r.parentrevs(rev)
rs = r.rawsize(rev)
ts = ts + rs
heads -= set(r.parentrevs(rev))
heads.add(rev)
try:
compression = ts / r.end(rev)
except ZeroDivisionError:
compression = 0
ui.write(
b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
b"%11d %5d %8d\n"
% (
rev,
p1,
p2,
r.start(rev),
r.end(rev),
r.start(dbase),
r.start(cbase),
r.start(p1),
r.start(p2),
rs,
ts,
compression,
len(heads),
clen,
)
)
return 0
v = r.version
format = v & 0xFFFF
flags = []
gdelta = False
if v & revlog.FLAG_INLINE_DATA:
flags.append(b'inline')
if v & revlog.FLAG_GENERALDELTA:
gdelta = True
flags.append(b'generaldelta')
if not flags:
flags = [b'(none)']
### tracks merge vs single parent
nummerges = 0
### tracks ways the "delta" are build
# nodelta
numempty = 0
numemptytext = 0
numemptydelta = 0
# full file content
numfull = 0
# intermediate snapshot against a prior snapshot
numsemi = 0
# snapshot count per depth
numsnapdepth = collections.defaultdict(lambda: 0)
# delta against previous revision
numprev = 0
# delta against first or second parent (not prev)
nump1 = 0
nump2 = 0
# delta against neither prev nor parents
numother = 0
# delta against prev that are also first or second parent
# (details of `numprev`)
nump1prev = 0
nump2prev = 0
# data about delta chain of each revs
chainlengths = []
chainbases = []
chainspans = []
# data about each revision
datasize = [None, 0, 0]
fullsize = [None, 0, 0]
semisize = [None, 0, 0]
# snapshot count per depth
snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
deltasize = [None, 0, 0]
chunktypecounts = {}
chunktypesizes = {}
def addsize(size, l):
if l[0] is None or size < l[0]:
l[0] = size
if size > l[1]:
l[1] = size
l[2] += size
numrevs = len(r)
for rev in pycompat.xrange(numrevs):
p1, p2 = r.parentrevs(rev)
delta = r.deltaparent(rev)
if format > 0:
addsize(r.rawsize(rev), datasize)
if p2 != nullrev:
nummerges += 1
size = r.length(rev)
if delta == nullrev:
chainlengths.append(0)
chainbases.append(r.start(rev))
chainspans.append(size)
if size == 0:
numempty += 1
numemptytext += 1
else:
numfull += 1
numsnapdepth[0] += 1
addsize(size, fullsize)
addsize(size, snapsizedepth[0])
else:
chainlengths.append(chainlengths[delta] + 1)
baseaddr = chainbases[delta]
revaddr = r.start(rev)
chainbases.append(baseaddr)
chainspans.append((revaddr - baseaddr) + size)
if size == 0:
numempty += 1
numemptydelta += 1
elif r.issnapshot(rev):
addsize(size, semisize)
numsemi += 1
depth = r.snapshotdepth(rev)
numsnapdepth[depth] += 1
addsize(size, snapsizedepth[depth])
else:
addsize(size, deltasize)
if delta == rev - 1:
numprev += 1
if delta == p1:
nump1prev += 1
elif delta == p2:
nump2prev += 1
elif delta == p1:
nump1 += 1
elif delta == p2:
nump2 += 1
elif delta != nullrev:
numother += 1
# Obtain data on the raw chunks in the revlog.
if util.safehasattr(r, b'_getsegmentforrevs'):
segment = r._getsegmentforrevs(rev, rev)[1]
else:
segment = r._revlog._getsegmentforrevs(rev, rev)[1]
if segment:
chunktype = bytes(segment[0:1])
else:
chunktype = b'empty'
if chunktype not in chunktypecounts:
chunktypecounts[chunktype] = 0
chunktypesizes[chunktype] = 0
chunktypecounts[chunktype] += 1
chunktypesizes[chunktype] += size
# Adjust size min value for empty cases
for size in (datasize, fullsize, semisize, deltasize):
if size[0] is None:
size[0] = 0
numdeltas = numrevs - numfull - numempty - numsemi
numoprev = numprev - nump1prev - nump2prev
totalrawsize = datasize[2]
datasize[2] /= numrevs
fulltotal = fullsize[2]
if numfull == 0:
fullsize[2] = 0
else:
fullsize[2] /= numfull
semitotal = semisize[2]
snaptotal = {}
if numsemi > 0:
semisize[2] /= numsemi
for depth in snapsizedepth:
snaptotal[depth] = snapsizedepth[depth][2]
snapsizedepth[depth][2] /= numsnapdepth[depth]
deltatotal = deltasize[2]
if numdeltas > 0:
deltasize[2] /= numdeltas
totalsize = fulltotal + semitotal + deltatotal
avgchainlen = sum(chainlengths) / numrevs
maxchainlen = max(chainlengths)
maxchainspan = max(chainspans)
compratio = 1
if totalsize:
compratio = totalrawsize / totalsize
basedfmtstr = b'%%%dd\n'
basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
def dfmtstr(max):
return basedfmtstr % len(str(max))
def pcfmtstr(max, padding=0):
return basepcfmtstr % (len(str(max)), b' ' * padding)
def pcfmt(value, total):
if total:
return (value, 100 * float(value) / total)
else:
return value, 100.0
ui.writenoi18n(b'format : %d\n' % format)
ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
ui.write(b'\n')
fmt = pcfmtstr(totalsize)
fmt2 = dfmtstr(totalsize)
ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
ui.writenoi18n(
b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
)
ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
ui.writenoi18n(
b' text : '
+ fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
)
ui.writenoi18n(
b' delta : '
+ fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
)
ui.writenoi18n(
b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
)
for depth in sorted(numsnapdepth):
ui.write(
(b' lvl-%-3d : ' % depth)
+ fmt % pcfmt(numsnapdepth[depth], numrevs)
)
ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
ui.writenoi18n(
b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
)
for depth in sorted(numsnapdepth):
ui.write(
(b' lvl-%-3d : ' % depth)
+ fmt % pcfmt(snaptotal[depth], totalsize)
)
ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
def fmtchunktype(chunktype):
if chunktype == b'empty':
return b' %s : ' % chunktype
elif chunktype in pycompat.bytestr(string.ascii_letters):
return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
else:
return b' 0x%s : ' % hex(chunktype)
ui.write(b'\n')
ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
for chunktype in sorted(chunktypecounts):
ui.write(fmtchunktype(chunktype))
ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
for chunktype in sorted(chunktypecounts):
ui.write(fmtchunktype(chunktype))
ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
ui.write(b'\n')
fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
if format > 0:
ui.write(b'\n')
ui.writenoi18n(
b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
% tuple(datasize)
)
ui.writenoi18n(
b'full revision size (min/max/avg) : %d / %d / %d\n'
% tuple(fullsize)
)
ui.writenoi18n(
b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
% tuple(semisize)
)
for depth in sorted(snapsizedepth):
if depth == 0:
continue
ui.writenoi18n(
b' level-%-3d (min/max/avg) : %d / %d / %d\n'
% ((depth,) + tuple(snapsizedepth[depth]))
)
ui.writenoi18n(
b'delta size (min/max/avg) : %d / %d / %d\n'
% tuple(deltasize)
)
if numdeltas > 0:
ui.write(b'\n')
fmt = pcfmtstr(numdeltas)
fmt2 = pcfmtstr(numdeltas, 4)
ui.writenoi18n(
b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
)
if numprev > 0:
ui.writenoi18n(
b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
)
ui.writenoi18n(
b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
)
ui.writenoi18n(
b' other : ' + fmt2 % pcfmt(numoprev, numprev)
)
if gdelta:
ui.writenoi18n(
b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
)
ui.writenoi18n(
b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
)
ui.writenoi18n(
b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
)
@command(
b'debugrevlogindex',
cmdutil.debugrevlogopts
+ [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
_(b'[-f FORMAT] -c|-m|FILE'),
optionalrepo=True,
)
def debugrevlogindex(ui, repo, file_=None, **opts):
"""dump the contents of a revlog index"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
format = opts.get(b'format', 0)
if format not in (0, 1):
raise error.Abort(_(b"unknown format %d") % format)
if ui.debugflag:
shortfn = hex
else:
shortfn = short
# There might not be anything in r, so have a sane default
idlen = 12
for i in r:
idlen = len(shortfn(r.node(i)))
break
if format == 0:
if ui.verbose:
ui.writenoi18n(
b" rev offset length linkrev %s %s p2\n"
% (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
)
else:
ui.writenoi18n(
b" rev linkrev %s %s p2\n"
% (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
)
elif format == 1:
if ui.verbose:
ui.writenoi18n(
(
b" rev flag offset length size link p1"
b" p2 %s\n"
)
% b"nodeid".rjust(idlen)
)
else:
ui.writenoi18n(
b" rev flag size link p1 p2 %s\n"
% b"nodeid".rjust(idlen)
)
for i in r:
node = r.node(i)
if format == 0:
try:
pp = r.parents(node)
except Exception:
pp = [nullid, nullid]
if ui.verbose:
ui.write(
b"% 6d % 9d % 7d % 7d %s %s %s\n"
% (
i,
r.start(i),
r.length(i),
r.linkrev(i),
shortfn(node),
shortfn(pp[0]),
shortfn(pp[1]),
)
)
else:
ui.write(
b"% 6d % 7d %s %s %s\n"
% (
i,
r.linkrev(i),
shortfn(node),
shortfn(pp[0]),
shortfn(pp[1]),
)
)
elif format == 1:
pr = r.parentrevs(i)
if ui.verbose:
ui.write(
b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
% (
i,
r.flags(i),
r.start(i),
r.length(i),
r.rawsize(i),
r.linkrev(i),
pr[0],
pr[1],
shortfn(node),
)
)
else:
ui.write(
b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
% (
i,
r.flags(i),
r.rawsize(i),
r.linkrev(i),
pr[0],
pr[1],
shortfn(node),
)
)
@command(
b'debugrevspec',
[
(
b'',
b'optimize',
None,
_(b'print parsed tree after optimizing (DEPRECATED)'),
),
(
b'',
b'show-revs',
True,
_(b'print list of result revisions (default)'),
),
(
b's',
b'show-set',
None,
_(b'print internal representation of result set'),
),
(
b'p',
b'show-stage',
[],
_(b'print parsed tree at the given stage'),
_(b'NAME'),
),
(b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
(b'', b'verify-optimized', False, _(b'verify optimized result')),
],
b'REVSPEC',
)
def debugrevspec(ui, repo, expr, **opts):
"""parse and apply a revision specification
Use -p/--show-stage option to print the parsed tree at the given stages.
Use -p all to print tree at every stage.
Use --no-show-revs option with -s or -p to print only the set
representation or the parsed tree respectively.
Use --verify-optimized to compare the optimized result with the unoptimized
one. Returns 1 if the optimized result differs.
"""
opts = pycompat.byteskwargs(opts)
aliases = ui.configitems(b'revsetalias')
stages = [
(b'parsed', lambda tree: tree),
(
b'expanded',
lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
),
(b'concatenated', revsetlang.foldconcat),
(b'analyzed', revsetlang.analyze),
(b'optimized', revsetlang.optimize),
]
if opts[b'no_optimized']:
stages = stages[:-1]
if opts[b'verify_optimized'] and opts[b'no_optimized']:
raise error.Abort(
_(b'cannot use --verify-optimized with --no-optimized')
)
stagenames = set(n for n, f in stages)
showalways = set()
showchanged = set()
if ui.verbose and not opts[b'show_stage']:
# show parsed tree by --verbose (deprecated)
showalways.add(b'parsed')
showchanged.update([b'expanded', b'concatenated'])
if opts[b'optimize']:
showalways.add(b'optimized')
if opts[b'show_stage'] and opts[b'optimize']:
raise error.Abort(_(b'cannot use --optimize with --show-stage'))
if opts[b'show_stage'] == [b'all']:
showalways.update(stagenames)
else:
for n in opts[b'show_stage']:
if n not in stagenames:
raise error.Abort(_(b'invalid stage name: %s') % n)
showalways.update(opts[b'show_stage'])
treebystage = {}
printedtree = None
tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
for n, f in stages:
treebystage[n] = tree = f(tree)
if n in showalways or (n in showchanged and tree != printedtree):
if opts[b'show_stage'] or n != b'parsed':
ui.write(b"* %s:\n" % n)
ui.write(revsetlang.prettyformat(tree), b"\n")
printedtree = tree
if opts[b'verify_optimized']:
arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
brevs = revset.makematcher(treebystage[b'optimized'])(repo)
if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
ui.writenoi18n(
b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
)
ui.writenoi18n(
b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
)
arevs = list(arevs)
brevs = list(brevs)
if arevs == brevs:
return 0
ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
sm = difflib.SequenceMatcher(None, arevs, brevs)
for tag, alo, ahi, blo, bhi in sm.get_opcodes():
if tag in (r'delete', r'replace'):
for c in arevs[alo:ahi]:
ui.write(b'-%d\n' % c, label=b'diff.deleted')
if tag in (r'insert', r'replace'):
for c in brevs[blo:bhi]:
ui.write(b'+%d\n' % c, label=b'diff.inserted')
if tag == r'equal':
for c in arevs[alo:ahi]:
ui.write(b' %d\n' % c)
return 1
func = revset.makematcher(tree)
revs = func(repo)
if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
if not opts[b'show_revs']:
return
for c in revs:
ui.write(b"%d\n" % c)
@command(
b'debugserve',
[
(
b'',
b'sshstdio',
False,
_(b'run an SSH server bound to process handles'),
),
(b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
(b'', b'logiofile', b'', _(b'file to log server I/O to')),
],
b'',
)
def debugserve(ui, repo, **opts):
"""run a server with advanced settings
This command is similar to :hg:`serve`. It exists partially as a
workaround to the fact that ``hg serve --stdio`` must have specific
arguments for security reasons.
"""
opts = pycompat.byteskwargs(opts)
if not opts[b'sshstdio']:
raise error.Abort(_(b'only --sshstdio is currently supported'))
logfh = None
if opts[b'logiofd'] and opts[b'logiofile']:
raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
if opts[b'logiofd']:
# Line buffered because output is line based.
try:
logfh = os.fdopen(int(opts[b'logiofd']), r'ab', 1)
except OSError as e:
if e.errno != errno.ESPIPE:
raise
# can't seek a pipe, so `ab` mode fails on py3
logfh = os.fdopen(int(opts[b'logiofd']), r'wb', 1)
elif opts[b'logiofile']:
logfh = open(opts[b'logiofile'], b'ab', 1)
s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
s.serve_forever()
@command(b'debugsetparents', [], _(b'REV1 [REV2]'))
def debugsetparents(ui, repo, rev1, rev2=None):
"""manually set the parents of the current working directory
This is useful for writing repository conversion tools, but should
be used with care. For example, neither the working directory nor the
dirstate is updated, so file status may be incorrect after running this
command.
Returns 0 on success.
"""
node1 = scmutil.revsingle(repo, rev1).node()
node2 = scmutil.revsingle(repo, rev2, b'null').node()
with repo.wlock():
repo.setparents(node1, node2)
@command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
def debugsidedata(ui, repo, file_, rev=None, **opts):
"""dump the side data for a cl/manifest/file revision
Use --verbose to dump the sidedata content."""
opts = pycompat.byteskwargs(opts)
if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
if rev is not None:
raise error.CommandError(b'debugdata', _(b'invalid arguments'))
file_, rev = None, file_
elif rev is None:
raise error.CommandError(b'debugdata', _(b'invalid arguments'))
r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
r = getattr(r, '_revlog', r)
try:
sidedata = r.sidedata(r.lookup(rev))
except KeyError:
raise error.Abort(_(b'invalid revision identifier %s') % rev)
if sidedata:
sidedata = list(sidedata.items())
sidedata.sort()
ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
for key, value in sidedata:
ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
if ui.verbose:
ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
@command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
def debugssl(ui, repo, source=None, **opts):
'''test a secure connection to a server
This builds the certificate chain for the server on Windows, installing the
missing intermediates and trusted root via Windows Update if necessary. It
does nothing on other platforms.
If SOURCE is omitted, the 'default' path will be used. If a URL is given,
that server is used. See :hg:`help urls` for more information.
If the update succeeds, retry the original operation. Otherwise, the cause
of the SSL error is likely another issue.
'''
if not pycompat.iswindows:
raise error.Abort(
_(b'certificate chain building is only possible on Windows')
)
if not source:
if not repo:
raise error.Abort(
_(
b"there is no Mercurial repository here, and no "
b"server specified"
)
)
source = b"default"
source, branches = hg.parseurl(ui.expandpath(source))
url = util.url(source)
defaultport = {b'https': 443, b'ssh': 22}
if url.scheme in defaultport:
try:
addr = (url.host, int(url.port or defaultport[url.scheme]))
except ValueError:
raise error.Abort(_(b"malformed port number in URL"))
else:
raise error.Abort(_(b"only https and ssh connections are supported"))
from . import win32
s = ssl.wrap_socket(
socket.socket(),
ssl_version=ssl.PROTOCOL_TLS,
cert_reqs=ssl.CERT_NONE,
ca_certs=None,
)
try:
s.connect(addr)
cert = s.getpeercert(True)
ui.status(_(b'checking the certificate chain for %s\n') % url.host)
complete = win32.checkcertificatechain(cert, build=False)
if not complete:
ui.status(_(b'certificate chain is incomplete, updating... '))
if not win32.checkcertificatechain(cert):
ui.status(_(b'failed.\n'))
else:
ui.status(_(b'done.\n'))
else:
ui.status(_(b'full certificate chain is available\n'))
finally:
s.close()
@command(
b'debugsub',
[(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
_(b'[-r REV] [REV]'),
)
def debugsub(ui, repo, rev=None):
ctx = scmutil.revsingle(repo, rev, None)
for k, v in sorted(ctx.substate.items()):
ui.writenoi18n(b'path %s\n' % k)
ui.writenoi18n(b' source %s\n' % v[0])
ui.writenoi18n(b' revision %s\n' % v[1])
@command(
b'debugsuccessorssets',
[(b'', b'closest', False, _(b'return closest successors sets only'))],
_(b'[REV]'),
)
def debugsuccessorssets(ui, repo, *revs, **opts):
"""show set of successors for revision
A successors set of changeset A is a consistent group of revisions that
succeed A. It contains non-obsolete changesets only unless closests
successors set is set.
In most cases a changeset A has a single successors set containing a single
successor (changeset A replaced by A').
A changeset that is made obsolete with no successors are called "pruned".
Such changesets have no successors sets at all.
A changeset that has been "split" will have a successors set containing
more than one successor.
A changeset that has been rewritten in multiple different ways is called
"divergent". Such changesets have multiple successor sets (each of which
may also be split, i.e. have multiple successors).
Results are displayed as follows::
<rev1>
<successors-1A>
<rev2>
<successors-2A>
<successors-2B1> <successors-2B2> <successors-2B3>
Here rev2 has two possible (i.e. divergent) successors sets. The first
holds one element, whereas the second holds three (i.e. the changeset has
been split).
"""
# passed to successorssets caching computation from one call to another
cache = {}
ctx2str = bytes
node2str = short
for rev in scmutil.revrange(repo, revs):
ctx = repo[rev]
ui.write(b'%s\n' % ctx2str(ctx))
for succsset in obsutil.successorssets(
repo, ctx.node(), closest=opts[r'closest'], cache=cache
):
if succsset:
ui.write(b' ')
ui.write(node2str(succsset[0]))
for node in succsset[1:]:
ui.write(b' ')
ui.write(node2str(node))
ui.write(b'\n')
@command(
b'debugtemplate',
[
(b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
(b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
],
_(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
optionalrepo=True,
)
def debugtemplate(ui, repo, tmpl, **opts):
"""parse and apply a template
If -r/--rev is given, the template is processed as a log template and
applied to the given changesets. Otherwise, it is processed as a generic
template.
Use --verbose to print the parsed tree.
"""
revs = None
if opts[r'rev']:
if repo is None:
raise error.RepoError(
_(b'there is no Mercurial repository here (.hg not found)')
)
revs = scmutil.revrange(repo, opts[r'rev'])
props = {}
for d in opts[r'define']:
try:
k, v = (e.strip() for e in d.split(b'=', 1))
if not k or k == b'ui':
raise ValueError
props[k] = v
except ValueError:
raise error.Abort(_(b'malformed keyword definition: %s') % d)
if ui.verbose:
aliases = ui.configitems(b'templatealias')
tree = templater.parse(tmpl)
ui.note(templater.prettyformat(tree), b'\n')
newtree = templater.expandaliases(tree, aliases)
if newtree != tree:
ui.notenoi18n(
b"* expanded:\n", templater.prettyformat(newtree), b'\n'
)
if revs is None:
tres = formatter.templateresources(ui, repo)
t = formatter.maketemplater(ui, tmpl, resources=tres)
if ui.verbose:
kwds, funcs = t.symbolsuseddefault()
ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
ui.write(t.renderdefault(props))
else:
displayer = logcmdutil.maketemplater(ui, repo, tmpl)
if ui.verbose:
kwds, funcs = displayer.t.symbolsuseddefault()
ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
for r in revs:
displayer.show(repo[r], **pycompat.strkwargs(props))
displayer.close()
@command(
b'debuguigetpass',
[(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
_(b'[-p TEXT]'),
norepo=True,
)
def debuguigetpass(ui, prompt=b''):
"""show prompt to type password"""
r = ui.getpass(prompt)
ui.writenoi18n(b'respose: %s\n' % r)
@command(
b'debuguiprompt',
[(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
_(b'[-p TEXT]'),
norepo=True,
)
def debuguiprompt(ui, prompt=b''):
"""show plain prompt"""
r = ui.prompt(prompt)
ui.writenoi18n(b'response: %s\n' % r)
@command(b'debugupdatecaches', [])
def debugupdatecaches(ui, repo, *pats, **opts):
"""warm all known caches in the repository"""
with repo.wlock(), repo.lock():
repo.updatecaches(full=True)
@command(
b'debugupgraderepo',
[
(
b'o',
b'optimize',
[],
_(b'extra optimization to perform'),
_(b'NAME'),
),
(b'', b'run', False, _(b'performs an upgrade')),
(b'', b'backup', True, _(b'keep the old repository content around')),
(b'', b'changelog', None, _(b'select the changelog for upgrade')),
(b'', b'manifest', None, _(b'select the manifest for upgrade')),
],
)
def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
"""upgrade a repository to use different features
If no arguments are specified, the repository is evaluated for upgrade
and a list of problems and potential optimizations is printed.
With ``--run``, a repository upgrade is performed. Behavior of the upgrade
can be influenced via additional arguments. More details will be provided
by the command output when run without ``--run``.
During the upgrade, the repository will be locked and no writes will be
allowed.
At the end of the upgrade, the repository may not be readable while new
repository data is swapped in. This window will be as long as it takes to
rename some directories inside the ``.hg`` directory. On most machines, this
should complete almost instantaneously and the chances of a consumer being
unable to access the repository should be low.
By default, all revlog will be upgraded. You can restrict this using flag
such as `--manifest`:
* `--manifest`: only optimize the manifest
* `--no-manifest`: optimize all revlog but the manifest
* `--changelog`: optimize the changelog only
* `--no-changelog --no-manifest`: optimize filelogs only
"""
return upgrade.upgraderepo(
ui, repo, run=run, optimize=optimize, backup=backup, **opts
)
@command(
b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
)
def debugwalk(ui, repo, *pats, **opts):
"""show how files match on given patterns"""
opts = pycompat.byteskwargs(opts)
m = scmutil.match(repo[None], pats, opts)
if ui.verbose:
ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
items = list(repo[None].walk(m))
if not items:
return
f = lambda fn: fn
if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
f = lambda fn: util.normpath(fn)
fmt = b'f %%-%ds %%-%ds %%s' % (
max([len(abs) for abs in items]),
max([len(repo.pathto(abs)) for abs in items]),
)
for abs in items:
line = fmt % (
abs,
f(repo.pathto(abs)),
m.exact(abs) and b'exact' or b'',
)
ui.write(b"%s\n" % line.rstrip())
@command(b'debugwhyunstable', [], _(b'REV'))
def debugwhyunstable(ui, repo, rev):
"""explain instabilities of a changeset"""
for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
dnodes = b''
if entry.get(b'divergentnodes'):
dnodes = (
b' '.join(
b'%s (%s)' % (ctx.hex(), ctx.phasestr())
for ctx in entry[b'divergentnodes']
)
+ b' '
)
ui.write(
b'%s: %s%s %s\n'
% (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
)
@command(
b'debugwireargs',
[
(b'', b'three', b'', b'three'),
(b'', b'four', b'', b'four'),
(b'', b'five', b'', b'five'),
]
+ cmdutil.remoteopts,
_(b'REPO [OPTIONS]... [ONE [TWO]]'),
norepo=True,
)
def debugwireargs(ui, repopath, *vals, **opts):
opts = pycompat.byteskwargs(opts)
repo = hg.peer(ui, opts, repopath)
for opt in cmdutil.remoteopts:
del opts[opt[1]]
args = {}
for k, v in pycompat.iteritems(opts):
if v:
args[k] = v
args = pycompat.strkwargs(args)
# run twice to check that we don't mess up the stream for the next command
res1 = repo.debugwireargs(*vals, **args)
res2 = repo.debugwireargs(*vals, **args)
ui.write(b"%s\n" % res1)
if res1 != res2:
ui.warn(b"%s\n" % res2)
def _parsewirelangblocks(fh):
activeaction = None
blocklines = []
lastindent = 0
for line in fh:
line = line.rstrip()
if not line:
continue
if line.startswith(b'#'):
continue
if not line.startswith(b' '):
# New block. Flush previous one.
if activeaction:
yield activeaction, blocklines
activeaction = line
blocklines = []
lastindent = 0
continue
# Else we start with an indent.
if not activeaction:
raise error.Abort(_(b'indented line outside of block'))
indent = len(line) - len(line.lstrip())
# If this line is indented more than the last line, concatenate it.
if indent > lastindent and blocklines:
blocklines[-1] += line.lstrip()
else:
blocklines.append(line)
lastindent = indent
# Flush last block.
if activeaction:
yield activeaction, blocklines
@command(
b'debugwireproto',
[
(b'', b'localssh', False, _(b'start an SSH server for this repo')),
(b'', b'peer', b'', _(b'construct a specific version of the peer')),
(
b'',
b'noreadstderr',
False,
_(b'do not read from stderr of the remote'),
),
(
b'',
b'nologhandshake',
False,
_(b'do not log I/O related to the peer handshake'),
),
]
+ cmdutil.remoteopts,
_(b'[PATH]'),
optionalrepo=True,
)
def debugwireproto(ui, repo, path=None, **opts):
"""send wire protocol commands to a server
This command can be used to issue wire protocol commands to remote
peers and to debug the raw data being exchanged.
``--localssh`` will start an SSH server against the current repository
and connect to that. By default, the connection will perform a handshake
and establish an appropriate peer instance.
``--peer`` can be used to bypass the handshake protocol and construct a
peer instance using the specified class type. Valid values are ``raw``,
``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
raw data payloads and don't support higher-level command actions.
``--noreadstderr`` can be used to disable automatic reading from stderr
of the peer (for SSH connections only). Disabling automatic reading of
stderr is useful for making output more deterministic.
Commands are issued via a mini language which is specified via stdin.
The language consists of individual actions to perform. An action is
defined by a block. A block is defined as a line with no leading
space followed by 0 or more lines with leading space. Blocks are
effectively a high-level command with additional metadata.
Lines beginning with ``#`` are ignored.
The following sections denote available actions.
raw
---
Send raw data to the server.
The block payload contains the raw data to send as one atomic send
operation. The data may not actually be delivered in a single system
call: it depends on the abilities of the transport being used.
Each line in the block is de-indented and concatenated. Then, that
value is evaluated as a Python b'' literal. This allows the use of
backslash escaping, etc.
raw+
----
Behaves like ``raw`` except flushes output afterwards.
command <X>
-----------
Send a request to run a named command, whose name follows the ``command``
string.
Arguments to the command are defined as lines in this block. The format of
each line is ``<key> <value>``. e.g.::
command listkeys
namespace bookmarks
If the value begins with ``eval:``, it will be interpreted as a Python
literal expression. Otherwise values are interpreted as Python b'' literals.
This allows sending complex types and encoding special byte sequences via
backslash escaping.
The following arguments have special meaning:
``PUSHFILE``
When defined, the *push* mechanism of the peer will be used instead
of the static request-response mechanism and the content of the
file specified in the value of this argument will be sent as the
command payload.
This can be used to submit a local bundle file to the remote.
batchbegin
----------
Instruct the peer to begin a batched send.
All ``command`` blocks are queued for execution until the next
``batchsubmit`` block.
batchsubmit
-----------
Submit previously queued ``command`` blocks as a batch request.
This action MUST be paired with a ``batchbegin`` action.
httprequest <method> <path>
---------------------------
(HTTP peer only)
Send an HTTP request to the peer.
The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
headers to add to the request. e.g. ``Accept: foo``.
The following arguments are special:
``BODYFILE``
The content of the file defined as the value to this argument will be
transferred verbatim as the HTTP request body.
``frame <type> <flags> <payload>``
Send a unified protocol frame as part of the request body.
All frames will be collected and sent as the body to the HTTP
request.
close
-----
Close the connection to the server.
flush
-----
Flush data written to the server.
readavailable
-------------
Close the write end of the connection and read all available data from
the server.
If the connection to the server encompasses multiple pipes, we poll both
pipes and read available data.
readline
--------
Read a line of output from the server. If there are multiple output
pipes, reads only the main pipe.
ereadline
---------
Like ``readline``, but read from the stderr pipe, if available.
read <X>
--------
``read()`` N bytes from the server's main output pipe.
eread <X>
---------
``read()`` N bytes from the server's stderr pipe, if available.
Specifying Unified Frame-Based Protocol Frames
----------------------------------------------
It is possible to emit a *Unified Frame-Based Protocol* by using special
syntax.
A frame is composed as a type, flags, and payload. These can be parsed
from a string of the form:
<request-id> <stream-id> <stream-flags> <type> <flags> <payload>
``request-id`` and ``stream-id`` are integers defining the request and
stream identifiers.
``type`` can be an integer value for the frame type or the string name
of the type. The strings are defined in ``wireprotoframing.py``. e.g.
``command-name``.
``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
components. Each component (and there can be just one) can be an integer
or a flag name for stream flags or frame flags, respectively. Values are
resolved to integers and then bitwise OR'd together.
``payload`` represents the raw frame payload. If it begins with
``cbor:``, the following string is evaluated as Python code and the
resulting object is fed into a CBOR encoder. Otherwise it is interpreted
as a Python byte string literal.
"""
opts = pycompat.byteskwargs(opts)
if opts[b'localssh'] and not repo:
raise error.Abort(_(b'--localssh requires a repository'))
if opts[b'peer'] and opts[b'peer'] not in (
b'raw',
b'http2',
b'ssh1',
b'ssh2',
):
raise error.Abort(
_(b'invalid value for --peer'),
hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
)
if path and opts[b'localssh']:
raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
if ui.interactive():
ui.write(_(b'(waiting for commands on stdin)\n'))
blocks = list(_parsewirelangblocks(ui.fin))
proc = None
stdin = None
stdout = None
stderr = None
opener = None
if opts[b'localssh']:
# We start the SSH server in its own process so there is process
# separation. This prevents a whole class of potential bugs around
# shared state from interfering with server operation.
args = procutil.hgcmd() + [
b'-R',
repo.root,
b'debugserve',
b'--sshstdio',
]
proc = subprocess.Popen(
pycompat.rapply(procutil.tonativestr, args),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
stdin = proc.stdin
stdout = proc.stdout
stderr = proc.stderr
# We turn the pipes into observers so we can log I/O.
if ui.verbose or opts[b'peer'] == b'raw':
stdin = util.makeloggingfileobject(
ui, proc.stdin, b'i', logdata=True
)
stdout = util.makeloggingfileobject(
ui, proc.stdout, b'o', logdata=True
)
stderr = util.makeloggingfileobject(
ui, proc.stderr, b'e', logdata=True
)
# --localssh also implies the peer connection settings.
url = b'ssh://localserver'
autoreadstderr = not opts[b'noreadstderr']
if opts[b'peer'] == b'ssh1':
ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
peer = sshpeer.sshv1peer(
ui,
url,
proc,
stdin,
stdout,
stderr,
None,
autoreadstderr=autoreadstderr,
)
elif opts[b'peer'] == b'ssh2':
ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
peer = sshpeer.sshv2peer(
ui,
url,
proc,
stdin,
stdout,
stderr,
None,
autoreadstderr=autoreadstderr,
)
elif opts[b'peer'] == b'raw':
ui.write(_(b'using raw connection to peer\n'))
peer = None
else:
ui.write(_(b'creating ssh peer from handshake results\n'))
peer = sshpeer.makepeer(
ui,
url,
proc,
stdin,
stdout,
stderr,
autoreadstderr=autoreadstderr,
)
elif path:
# We bypass hg.peer() so we can proxy the sockets.
# TODO consider not doing this because we skip
# ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
u = util.url(path)
if u.scheme != b'http':
raise error.Abort(_(b'only http:// paths are currently supported'))
url, authinfo = u.authinfo()
openerargs = {
r'useragent': b'Mercurial debugwireproto',
}
# Turn pipes/sockets into observers so we can log I/O.
if ui.verbose:
openerargs.update(
{
r'loggingfh': ui,
r'loggingname': b's',
r'loggingopts': {r'logdata': True, r'logdataapis': False,},
}
)
if ui.debugflag:
openerargs[r'loggingopts'][r'logdataapis'] = True
# Don't send default headers when in raw mode. This allows us to
# bypass most of the behavior of our URL handling code so we can
# have near complete control over what's sent on the wire.
if opts[b'peer'] == b'raw':
openerargs[r'sendaccept'] = False
opener = urlmod.opener(ui, authinfo, **openerargs)
if opts[b'peer'] == b'http2':
ui.write(_(b'creating http peer for wire protocol version 2\n'))
# We go through makepeer() because we need an API descriptor for
# the peer instance to be useful.
with ui.configoverride(
{(b'experimental', b'httppeer.advertise-v2'): True}
):
if opts[b'nologhandshake']:
ui.pushbuffer()
peer = httppeer.makepeer(ui, path, opener=opener)
if opts[b'nologhandshake']:
ui.popbuffer()
if not isinstance(peer, httppeer.httpv2peer):
raise error.Abort(
_(
b'could not instantiate HTTP peer for '
b'wire protocol version 2'
),
hint=_(
b'the server may not have the feature '
b'enabled or is not allowing this '
b'client version'
),
)
elif opts[b'peer'] == b'raw':
ui.write(_(b'using raw connection to peer\n'))
peer = None
elif opts[b'peer']:
raise error.Abort(
_(b'--peer %s not supported with HTTP peers') % opts[b'peer']
)
else:
peer = httppeer.makepeer(ui, path, opener=opener)
# We /could/ populate stdin/stdout with sock.makefile()...
else:
raise error.Abort(_(b'unsupported connection configuration'))
batchedcommands = None
# Now perform actions based on the parsed wire language instructions.
for action, lines in blocks:
if action in (b'raw', b'raw+'):
if not stdin:
raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
# Concatenate the data together.
data = b''.join(l.lstrip() for l in lines)
data = stringutil.unescapestr(data)
stdin.write(data)
if action == b'raw+':
stdin.flush()
elif action == b'flush':
if not stdin:
raise error.Abort(_(b'cannot call flush on this peer'))
stdin.flush()
elif action.startswith(b'command'):
if not peer:
raise error.Abort(
_(
b'cannot send commands unless peer instance '
b'is available'
)
)
command = action.split(b' ', 1)[1]
args = {}
for line in lines:
# We need to allow empty values.
fields = line.lstrip().split(b' ', 1)
if len(fields) == 1:
key = fields[0]
value = b''
else:
key, value = fields
if value.startswith(b'eval:'):
value = stringutil.evalpythonliteral(value[5:])
else:
value = stringutil.unescapestr(value)
args[key] = value
if batchedcommands is not None:
batchedcommands.append((command, args))
continue
ui.status(_(b'sending %s command\n') % command)
if b'PUSHFILE' in args:
with open(args[b'PUSHFILE'], r'rb') as fh:
del args[b'PUSHFILE']
res, output = peer._callpush(
command, fh, **pycompat.strkwargs(args)
)
ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
ui.status(
_(b'remote output: %s\n') % stringutil.escapestr(output)
)
else:
with peer.commandexecutor() as e:
res = e.callcommand(command, args).result()
if isinstance(res, wireprotov2peer.commandresponse):
val = res.objects()
ui.status(
_(b'response: %s\n')
% stringutil.pprint(val, bprefix=True, indent=2)
)
else:
ui.status(
_(b'response: %s\n')
% stringutil.pprint(res, bprefix=True, indent=2)
)
elif action == b'batchbegin':
if batchedcommands is not None:
raise error.Abort(_(b'nested batchbegin not allowed'))
batchedcommands = []
elif action == b'batchsubmit':
# There is a batching API we could go through. But it would be
# difficult to normalize requests into function calls. It is easier
# to bypass this layer and normalize to commands + args.
ui.status(
_(b'sending batch with %d sub-commands\n')
% len(batchedcommands)
)
for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
ui.status(
_(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
)
batchedcommands = None
elif action.startswith(b'httprequest '):
if not opener:
raise error.Abort(
_(b'cannot use httprequest without an HTTP peer')
)
request = action.split(b' ', 2)
if len(request) != 3:
raise error.Abort(
_(
b'invalid httprequest: expected format is '
b'"httprequest <method> <path>'
)
)
method, httppath = request[1:]
headers = {}
body = None
frames = []
for line in lines:
line = line.lstrip()
m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
if m:
# Headers need to use native strings.
key = pycompat.strurl(m.group(1))
value = pycompat.strurl(m.group(2))
headers[key] = value
continue
if line.startswith(b'BODYFILE '):
with open(line.split(b' ', 1), b'rb') as fh:
body = fh.read()
elif line.startswith(b'frame '):
frame = wireprotoframing.makeframefromhumanstring(
line[len(b'frame ') :]
)
frames.append(frame)
else:
raise error.Abort(
_(b'unknown argument to httprequest: %s') % line
)
url = path + httppath
if frames:
body = b''.join(bytes(f) for f in frames)
req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
# urllib.Request insists on using has_data() as a proxy for
# determining the request method. Override that to use our
# explicitly requested method.
req.get_method = lambda: pycompat.sysstr(method)
try:
res = opener.open(req)
body = res.read()
except util.urlerr.urlerror as e:
# read() method must be called, but only exists in Python 2
getattr(e, 'read', lambda: None)()
continue
ct = res.headers.get(r'Content-Type')
if ct == r'application/mercurial-cbor':
ui.write(
_(b'cbor> %s\n')
% stringutil.pprint(
cborutil.decodeall(body), bprefix=True, indent=2
)
)
elif action == b'close':
peer.close()
elif action == b'readavailable':
if not stdout or not stderr:
raise error.Abort(
_(b'readavailable not available on this peer')
)
stdin.close()
stdout.read()
stderr.read()
elif action == b'readline':
if not stdout:
raise error.Abort(_(b'readline not available on this peer'))
stdout.readline()
elif action == b'ereadline':
if not stderr:
raise error.Abort(_(b'ereadline not available on this peer'))
stderr.readline()
elif action.startswith(b'read '):
count = int(action.split(b' ', 1)[1])
if not stdout:
raise error.Abort(_(b'read not available on this peer'))
stdout.read(count)
elif action.startswith(b'eread '):
count = int(action.split(b' ', 1)[1])
if not stderr:
raise error.Abort(_(b'eread not available on this peer'))
stderr.read(count)
else:
raise error.Abort(_(b'unknown action: %s') % action)
if batchedcommands is not None:
raise error.Abort(_(b'unclosed "batchbegin" request'))
if peer:
peer.close()
if proc:
proc.kill()
| [
"[email protected]"
] | |
fa18fc848793dbb222ee7e14bd3c6903ea911e47 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_372/ch86_2020_06_22_18_23_28_347535.py | 837644fabd4e62704e1139de954c6abf35446f8e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py |
with open('dados.csv', 'r') as arquivo_csv:
conteudo = arquivo_csv.read()
arquivo_tsv = conteudo.replace(',',' ')
with open('dados.tsv', 'w') as arquivo:
arquivo.write(arquivo_tsv) | [
"[email protected]"
] | |
eb7bb63998735e104a61f2fe50aa634618a4affd | 5051b6731817e10b841fc0de78b4e68b47e2b6e2 | /retina_reinforcement_sim/scripts/baxter_visualise_cnn.py | b9148920c4eef19140b8db19e4abfa3fca0055c3 | [] | no_license | lewisboyd/MsciProject | f5123a45b969effde56bf1fe34c473f9b522c59d | 6dcb04e79f776fc780b843208e2c689578c94bb3 | refs/heads/master | 2020-04-05T21:46:25.783510 | 2019-04-16T15:25:24 | 2019-04-16T15:25:24 | 157,232,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,048 | py | #!/usr/bin/env python
import os
import cv2
import numpy as np
import torch
from model import ResNet10
def get_img(base_dir, id):
"""Index dataset to get image."""
img_name = base_dir + "img" + str(id) + ".png"
img = cv2.imread(img_name)
# Convert to float and rescale to [0,1]
img = img.astype(np.float32) / 255
return img
if __name__ == '__main__':
# Load and save paths
STATES_TENSOR = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/image_data/states")
IMG_FOLDER = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/image_data/images/")
RET_FOLDER = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/image_data/retina_images/")
STATE_DICT_IMG = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/sr/state_dicts/net_50")
STATE_DICT_RET = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/sr_retina/state_dicts/net_50")
OUT_FOLDER_RET = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/examples/sr/")
OUT_FOLDER_IMG = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/examples/sr_retina/")
OUT_FOLDER_GROUND = (os.path.dirname(
os.path.realpath(__file__)) + "/baxter_center/examples/ground/")
# Index of images to process
INDICES = np.arange(0, 1000, 10)
STATES = torch.load(STATES_TENSOR)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get object locations using normal images
srnet = ResNet10(2).to(device).eval()
srnet.load_state_dict(torch.load(STATE_DICT_IMG))
srnet_img_locs = []
for index in INDICES:
img = get_img(IMG_FOLDER, index)
img = cv2.resize(img, None, fx=0.7, fy=0.7,
interpolation=cv2.INTER_AREA)
img = torch.tensor(img, dtype=torch.float,
device=device).permute(2, 0, 1).unsqueeze(0)
with torch.no_grad():
loc = srnet(img)
srnet_img_locs.append(loc.cpu().numpy()[0])
# Get object locations using retina images
srnet.load_state_dict(torch.load(STATE_DICT_RET))
srnet_retina_locs = []
for index in INDICES:
img = get_img(RET_FOLDER, index)
img = cv2.resize(img, None, fx=0.7, fy=0.7,
interpolation=cv2.INTER_AREA)
img = torch.tensor(img, dtype=torch.float,
device=device).permute(2, 0, 1).unsqueeze(0)
with torch.no_grad():
loc = srnet(img)
srnet_retina_locs.append(loc.cpu().numpy()[0])
# Get ground truth locations
ground_truths = []
for index in INDICES:
ground_truths.append(STATES[index].numpy())
# Draw circles at predicted/ground locations and save images
for i, index in enumerate(INDICES):
# Rescale from [-1, 1] to [0, 2]
ground_truths[i] = ground_truths[i] + 1
srnet_img_locs[i] = srnet_img_locs[i] + 1
srnet_retina_locs[i] = srnet_retina_locs[i] + 1
# Multiply by half img height and width to get pixel location
ground_truths[i][0] = ground_truths[i][0] * 234
ground_truths[i][1] = ground_truths[i][1] * 123
srnet_img_locs[i][0] = srnet_img_locs[i][0] * 234
srnet_img_locs[i][1] = srnet_img_locs[i][1] * 123
srnet_retina_locs[i][0] = srnet_retina_locs[i][0] * 234
srnet_retina_locs[i][1] = srnet_retina_locs[i][1] * 123
# Draw circle at ground truth location and save
img = get_img(IMG_FOLDER, index)
loc = (ground_truths[i][0], ground_truths[i][1])
img = cv2.circle(img, loc, 5, (0, 255, 0), -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite(OUT_FOLDER_GROUND + "img" + str(index) + ".png", img * 255)
# Draw circle at location predicted using normal images and save
img = get_img(IMG_FOLDER, index)
loc = (srnet_img_locs[i][0], srnet_img_locs[i][1])
cv2.circle(img, loc, 5, (0, 255, 0), -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite(OUT_FOLDER_IMG + "img" + str(index) + ".png", img * 255)
# Draw circle at location predicted using retina images and save
img = get_img(IMG_FOLDER, index)
loc = (srnet_retina_locs[i][0], srnet_retina_locs[i][1])
cv2.circle(img, loc, 5, (0, 255, 0), -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite(OUT_FOLDER_RET + "img" + str(index) + ".png", img * 255)
# Create folders if not already exist
if not os.path.isdir(OUT_FOLDER_IMG):
os.makedirs(OUT_FOLDER_IMG)
if not os.path.isdir(OUT_FOLDER_RET):
os.makedirs(OUT_FOLDER_RET)
if not os.path.isdir(OUT_FOLDER_GROUND):
os.makedirs(OUT_FOLDER_GROUND)
# Save pixel locations
np.save(OUT_FOLDER_IMG + "pixel_locations", srnet_img_locs)
np.save(OUT_FOLDER_RET + "pixel_locations", srnet_retina_locs)
np.save(OUT_FOLDER_GROUND + "pixel_locations", ground_truths)
| [
"[email protected]"
] | |
6ac5efc7cfea20688469963535be7345951657ae | cf051b16272f67d20a4ae733d6078360964674f6 | /src/community/admin.py | b52913bf5a41553861a889727bf4720af59b0ff3 | [] | no_license | aekysong/gradproj-server | 2ca2e71d882dbe446123c4354f071e8e9a8a3025 | 5b9914ec751f1add1bd50abdf9ce572e05655a8d | refs/heads/master | 2022-06-24T04:23:31.919631 | 2020-05-07T15:32:10 | 2020-05-07T15:32:10 | 257,213,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from django.contrib import admin
from .models import Post, Comment, Notice
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Notice) | [
"[email protected]"
] | |
53bc52abece6c7a1cac6a1f43df4b6aa33020d12 | 3b7805468c760ab02239d57e06ba8f1308763170 | /lab0809-starter_zx996-master/src/CarRentalSystem.py | 05a0c0d4f33dbf8f851d6d9047c69102c18b5555 | [] | no_license | zx996/-cs1531 | 24f04e34290fa777c409a9998b754e2608f81884 | 9875527233b58ac33ce484c79ac77d63daded8a2 | refs/heads/master | 2020-03-24T06:16:35.300442 | 2018-09-28T06:25:01 | 2018-09-28T06:25:01 | 142,522,633 | 0 | 0 | null | 2018-07-30T09:58:26 | 2018-07-27T03:23:39 | Python | UTF-8 | Python | false | false | 2,107 | py | from src.Booking import Booking
import copy
class CarRentalSystem:
def __init__(self, admin_system, auth_manager):
self._cars = []
self._customers = []
self._bookings = []
self._admin_system = admin_system
self._auth_manager = auth_manager
'''
Query Processing Services
'''
def car_search(self, name=None, model=None):
return [i for i in self._cars
if(name.lower() in i.name.lower() and model.lower() in i.model.lower())
]
def get_user_by_id(self, user_id):
for c in self._customers:
if c.get_id() == user_id:
return c
return self._admin_system.get_user_by_id(user_id)
def get_car(self, rego):
for c in self.cars:
if c.rego == rego:
return c
return None
'''
Booking Services
'''
def make_booking(self, customer, period, car, location):
# Prevent the customer from referencing 'current_user';
# otherwise the customer recorded in each booking will be modified to
# a different user whenever the current_user changes (i.e. when new user logs-in)
customer = copy.copy(customer)
new_booking = Booking(customer, period, car, location)
self._bookings.append(new_booking)
car.add_booking(new_booking)
return new_booking
'''
Registration Services
'''
def add_car(self, car):
self._cars.append(car)
def add_customer(self, customer):
self._customers.append(customer)
'''
Login Services
'''
def login_customer(self, username, password):
for customer in self._customers:
if self._auth_manager.login(customer, username, password):
return True
return False
def login_admin(self, username, password):
return self._admin_system.login(username, password)
'''
Properties
'''
@property
def cars(self):
return self._cars
@property
def bookings(self):
return self._bookings
| [
"[email protected]"
] | |
4f8be92e793a6fae937e3a112d7e14efd308ca24 | 6c2608bc87b522da77c792e20330989de17b3005 | /chap-5/ex124.py | c6628121f54b5fb10fe1c70ca49bc0d805737d78 | [] | no_license | AleByron/AleByron-The-Python-Workbook-second-edition | 8a0b408c1bbd90c82e6b837fc898ee10341ca8fa | 491b2fd394aa04e29a4b2dbe9a615c547e239028 | refs/heads/main | 2023-01-13T21:01:17.757669 | 2020-11-11T01:29:28 | 2020-11-11T01:29:28 | 306,487,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | x = str(input("Enter the x coordinate of your first point: \n"))
y = str(input("Enter the y coordinate of your first point:\n"))
points = []
points.append(x)
points.append(y)
z = 0
m1 = 0 #sommatory of x*y
mx2 = 0 #sommatory of x
my2 = 0 #sommatory of y
m2 = 0 # product of sommatory x and sommatory y divided by 0
m3 = 0 #sommatory of x**2
m4 = 0 # ((sommatory of x)**2)/n
sy = 0 # summatory of y
sx = 0 # summatory of x
yv = 0 # average y value
xv = 0 # average x value
while points[z]!='':
x = str(input('Enter the x coordinate of another point:\n'))
if x == '':
break
y = str(input('Enter the y coordinate of another point:\n'))
points.append(x)
points.append(y)
z = z+2
z = 0
while len(points)>z:
points[z]=float(points[z])
z = z+1
z = 0
while len(points) > z:
a = points[z]*points[z+1]
m1 = m1 + a
z = z+2
z = 0
while len(points) > z:
mx2 = mx2 + points[z]
my2 = my2 +points[z+1]
z = z+2
m2 = (mx2*my2)/(len(points)/2)
z = 0
while len(points) > z:
m3 = m3 + points[z]**2
z = z+2
m4 = ((mx2)**2)/(len(points)/2)
m = (m1 - m2)/(m3-m4)
z = 0
while len(points)>z:
sx = sx + points[z]
sy = sy + points[z+1]
z = z+2
xv = sx/(len(points)/2)
yv = sy/(len(points)/2)
b = sy - m*sx
print( 'With your data y=',m,'* x +',b)
| [
"[email protected]"
] | |
29a72ae6b56db6acd56877044674cc86d1224cd2 | 1990900a92276126e1c67cce69de33e0f8d4f270 | /Server/keydict.py | 77475531c1fdc80d0c6362d746d6d99754bb9c12 | [] | no_license | Jacksonblair/SIT210Project | 6db59e671a7f062fdd9d98ae93ad5087fe0fc7a8 | 17724340fd8f84d17904065f9292055eeeb36b3a | refs/heads/main | 2023-05-29T16:51:42.035915 | 2021-06-06T09:39:41 | 2021-06-06T09:39:41 | 374,320,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # https://github.com/boppreh/keyboard/blob/master/keyboard/_canonical_names.py
# Maps browser key codes to key codes that the keyboard lib can parse
keydict = {
'KeyA': 'a',
'KeyB': 'b',
'KeyC': 'c',
'KeyD': 'd',
'KeyE': 'e',
'KeyF': 'f',
'KeyG': 'g',
'KeyH': 'h',
'KeyI': 'i',
'KeyJ': 'j',
'KeyK': 'k',
'KeyL': 'l',
'KeyM': 'm',
'KeyN': 'n',
'KeyO': 'o',
'KeyP': 'p',
'KeyQ': 'q',
'KeyR': 'r',
'KeyS': 's',
'KeyT': 't',
'KeyU': 'u',
'KeyV': 'v',
'KeyW': 'w',
'KeyX': 'x',
'KeyY': 'y',
'KeyZ': 'z',
'Digit0': '0',
'Digit1': '1',
'Digit2': '2',
'Digit3': '3',
'Digit4': '4',
'Digit5': '5',
'Digit6': '6',
'Digit7': '7',
'Digit8': '8',
'Digit9': '9',
'Numpad0': '0',
'Numpad1': '1',
'Numpad2': '2',
'Numpad3': '3',
'Numpad4': '4',
'Numpad5': '5',
'Numpad6': '6',
'Numpad7': '7',
'Numpad8': '8',
'Numpad9': '9',
'Tab': 'tab',
'CapsLock': 'caps lock',
'ShiftLeft': 'shift',
'ShiftRight': 'shift',
'ControlLeft': 'ctrl',
'ControlRight': 'ctrl',
'AltLeft': 'alt',
'AltRight': 'alt',
'ArrowLeft': 'left',
'ArrowRight': 'right',
'ArrowUp': 'up',
'ArrowDown': 'down',
'Space': 'space',
'Enter': 'enter',
'Backspace': 'backspace',
} | [
"[email protected]"
] | |
ccdc4e8142d0e2b9044e3b8bf578a2695a5c38d0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_leads.py | e86c49d643ca97b9c70c7f7692bf0545cdae9abb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _LEADS():
def __init__(self,):
self.name = "LEADS"
self.definitions = lead
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lead']
| [
"[email protected]"
] | |
97ca7fce27026a6ad099f82c46b49c36c0dfe8a2 | 600eae9323c5703e5ae3f25ece1bc54fbf4e4883 | /dft_structures/01_bulk_structures/02_IrO3/100/01_my_attempt/01_generate_init_slabs/sc_cut_slab.py | df3bbbb63ddbbbb4e43b6d36e2820c2221959077 | [] | no_license | flash-jaehyun/PROJ_IrOx_Active_Learning_OER | 0d2b16a34b43a8d7572ac3e2141551c59a974c0a | b2780de4dd4b72d89c60ed767d6fe9de290c19a7 | refs/heads/master | 2023-07-14T07:18:59.103433 | 2020-10-13T01:07:54 | 2020-10-13T01:07:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | #!/usr/bin/env python
"""Cut slabs from bulk structures using ASE, pymatgen, and CatKit.
Author: Raul A. Flores
"""
# | - Import Modules
from ase import io
# from ase.visualize import view
from atoms_objects.slab_generation import (
cut_slab_ase,
cut_slab_pymatgen,
cut_slab_catkit,
)
from catkit.gen import utils
#__|
# | - Script Inputs
facet = (1, 0, 0)
#__|
# | - Read Bulk Structure
bulk = io.read("init.cif")
# bulk = utils.get_spglib_cell(bulk)
# bulk.write("bulk_standard.cif")
#__|
# | - ASE
slab_ase = cut_slab_ase(
bulk,
facet,
layers=6,
vacuum=8,
)
slab_ase.write("out_slab_ase.cif")
#__|
# | - pymatgen
slab_pymatgen = cut_slab_pymatgen(
bulk,
facet,
min_slab_size=18.,
min_vacuum_size=10.,
)
slab_pymatgen.write("out_slab_pymatgen.cif")
#__|
# | - CatKit
slab_catkit = cut_slab_catkit(
bulk,
facet,
slab_thickness=20,
vacuum=8.,
)
slab_catkit = slab_catkit[0]
slab_catkit.write("out_slab_catkit.cif")
#__|
| [
"[email protected]"
] | |
2fac8d4acc2d38ad64a9574af647432bf52f1dd8 | bf6b2139d2ea3103d671b14e475cb6819941e1dd | /PyBites/45/test_fizzbuzz.py | e3090aee65c4a73643403ff16bfc92c94b843ea9 | [] | no_license | cyberfork2000/pycharmprojects | 424b1a3c10cd346032f5093bab57e21ec7abd2c5 | a135f0819f81665f963108ed5405fb549d8f85ba | refs/heads/master | 2022-01-20T18:02:58.406660 | 2019-12-23T15:19:24 | 2019-12-23T15:19:24 | 168,381,488 | 1 | 0 | null | 2022-01-06T22:29:41 | 2019-01-30T17:06:07 | Python | UTF-8 | Python | false | false | 2,725 | py | import pytest
from fizzbuzz import fizzbuzz, display_fizzbuzz
def test_invalid_input_fizzbuzz():
"""Tests fizzbuzz handling of invalid input"""
with pytest.raises(ValueError):
fizzbuzz('FAIL')
with pytest.raises(ValueError):
fizzbuzz(3.5)
def test_invalid_input_display_fizzbuzz():
"""Tests display_fizzbuzz handling of invalid input"""
with pytest.raises(ValueError):
display_fizzbuzz('FAIL')
with pytest.raises(ValueError):
display_fizzbuzz(3.5)
def test_fizz():
"""Tests if fizzbuzz returns Fizz for multiples of 3"""
assert fizzbuzz(3) == 'Fizz'
assert fizzbuzz(9) == 'Fizz'
assert fizzbuzz(63) == 'Fizz'
def test_buzz():
"""Tests if fizzbuzz returns Buzz for multiples of 5"""
assert fizzbuzz(5) == 'Buzz'
assert fizzbuzz(10) == 'Buzz'
assert fizzbuzz(55) == 'Buzz'
def test_fizzbuzz():
"""Tests if fizzbuzz returns FizzBuzz for multiples of 3 and 5"""
assert fizzbuzz(15) == 'FizzBuzz'
assert fizzbuzz(30) == 'FizzBuzz'
assert fizzbuzz(90) == 'FizzBuzz'
def test_nofizznobuzz():
"""Tests if fizzbuzz returns a number for non-multiples of 3 and/or 5"""
assert fizzbuzz(1) == 1
assert fizzbuzz(7) == 7
assert fizzbuzz(92) == 92
def test_print_fizz(capsys):
"""Tests if display_fizzbuzz prints Fizz correctly to stdout"""
display_fizzbuzz(3)
out, err = capsys.readouterr()
assert out == "Fizz\n"
display_fizzbuzz(9)
out, err = capsys.readouterr()
assert out == "Fizz\n"
display_fizzbuzz(33)
out, err = capsys.readouterr()
assert out == "Fizz\n"
def test_print_buzz(capsys):
"""Tests if display_fizzbuzz prints Buzz correctly to stdout"""
display_fizzbuzz(5)
out, err = capsys.readouterr()
assert out == "Buzz\n"
display_fizzbuzz(10)
out, err = capsys.readouterr()
assert out == "Buzz\n"
display_fizzbuzz(55)
out, err = capsys.readouterr()
assert out == "Buzz\n"
def test_print_fizzbuzz(capsys):
"""Tests if display_fizzbuzz prints FizzBuzz correctly to stdout"""
display_fizzbuzz(15)
out, err = capsys.readouterr()
assert out == "FizzBuzz\n"
display_fizzbuzz(30)
out, err = capsys.readouterr()
assert out == "FizzBuzz\n"
display_fizzbuzz(60)
out, err = capsys.readouterr()
assert out == "FizzBuzz\n"
def test_print_nofizznobuzz(capsys):
"""Tests if display_fizzbuzz prints non-multiples correctly to stdout"""
display_fizzbuzz(4)
out, err = capsys.readouterr()
assert out == "4\n"
display_fizzbuzz(11)
out, err = capsys.readouterr()
assert out == "11\n"
display_fizzbuzz(98)
out, err = capsys.readouterr()
assert out == "98\n" | [
"[email protected]"
] | |
d2ce67ea67e64569a8602b04169bc229b349ad56 | 47c2ee5ddc4fff17bf26c1e74517f9a6b120bcd0 | /domainadapt/custom_callbacks/Loss_plotter.py | 024d0acb209e6973709857381be551e654383205 | [] | no_license | 2020miccai/submission-813 | ad5807f85a9ca4a9470515347d2fa76a7b93483a | 0145705f1ce5b4b552731153b1657d09c296ab16 | refs/heads/master | 2022-09-19T20:20:19.488191 | 2020-05-25T18:23:59 | 2020-05-25T18:25:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,047 | py | import matplotlib
matplotlib.use('Agg')
import pandas as pd
import os
from os.path import join
import matplotlib.pyplot as plt
class LossPlotter(object):
def __init__(self, mylog_path="./log", mylog_name="training.log", myloss_names=["loss"],
mymetric_names=["Dice_Accuracy", "Accuracy"], cmb_plot=1):
super(LossPlotter, self).__init__()
self.log_path = mylog_path
self.log_name = mylog_name
self.loss_names = list(myloss_names)
self.metric_names = list(mymetric_names)
self.cmb_plot = cmb_plot
if cmb_plot:
plt_path = join(self.log_path, "plot")
if not os.path.exists(plt_path):
os.makedirs(plt_path)
os.makedirs(join(plt_path, "train"))
os.makedirs(join(plt_path, "valid"))
os.makedirs(join(plt_path, "test_lb"))
os.makedirs(join(plt_path, "test_un"))
else:
if not os.path.exists(join(self.log_path, "plot")):
os.makedirs(join(self.log_path, "plot"))
def plotter(self):
dataframe = pd.read_csv(join(self.log_path, self.log_name), skipinitialspace=True)
if self.cmb_plot:
for i in range(len(self.loss_names)):
plt.figure(i)
plt.plot(dataframe[self.loss_names[i]], label="train_" + self.loss_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "train", self.loss_names[i] + ".png"))
plt.close()
plt.figure(i)
plt.plot(dataframe["val_" + self.loss_names[i]], label="val_" + self.loss_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "valid", self.loss_names[i] + ".png"))
plt.close()
plt.figure(i)
plt.plot(dataframe["test_lb_" + self.loss_names[i]], label="test_lb_" + self.loss_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "test_lb", self.loss_names[i] + ".png"))
plt.close()
plt.figure(i)
plt.plot(dataframe["test_un_" + self.loss_names[i]], label="test_un_" + self.loss_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "test_un", self.loss_names[i] + ".png"))
plt.close()
for i in range(len(self.metric_names)):
plt.figure(i + len(self.loss_names))
plt.plot(dataframe[self.metric_names[i]], label="train_" + self.metric_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "train", self.metric_names[i] + ".png"))
plt.close()
plt.figure(i + len(self.loss_names))
plt.plot(dataframe["val_" + self.metric_names[i]], label="val_" + self.metric_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "valid", self.metric_names[i] + ".png"))
plt.close()
plt.figure(i + len(self.loss_names))
plt.plot(dataframe["test_lb_" + self.metric_names[i]], label="test_lb_" + self.metric_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "test_lb", self.metric_names[i] + ".png"))
plt.close()
plt.figure(i + len(self.loss_names))
plt.plot(dataframe["test_un_" + self.metric_names[i]], label="test_un_" + self.metric_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", "test_un", self.metric_names[i] + ".png"))
plt.close()
else:
for i in range(len(self.loss_names)):
plt.figure(i)
plt.plot(dataframe[self.loss_names[i]], label="train_" + self.loss_names[i])
plt.plot(dataframe["val_" + self.loss_names[i]], label="val_" + self.loss_names[i])
plt.plot(dataframe["test_lb_" + self.loss_names[i]], label="test_lb_" + self.loss_names[i])
plt.plot(dataframe["test_un_" + self.loss_names[i]], label="test_un_" + self.loss_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", self.loss_names[i] + ".png"))
plt.close()
for i in range(len(self.metric_names)):
plt.figure(i + len(self.loss_names))
plt.plot(dataframe[self.metric_names[i]], label="train_" + self.metric_names[i])
plt.plot(dataframe["val_" + self.metric_names[i]], label="val_" + self.metric_names[i])
plt.plot(dataframe["test_lb_" + self.metric_names[i]], label="test_lb_" + self.metric_names[i])
plt.plot(dataframe["test_un_" + self.metric_names[i]], label="test_un_" + self.metric_names[i])
plt.legend()
plt.savefig(join(self.log_path, "plot", self.metric_names[i] + ".png"))
plt.close()
| [
"[email protected]"
] | |
4b72779889492dc65a999e027e798b298e1a0640 | 8ab50987572b8dbef361e24cf35ac33ee58a1d6c | /xilva_core/src/state_machine.py | c82de3e2db47267f29947d2355deb886912b57d6 | [] | no_license | ustyui/xilva | d6deef698a1a5438b6808c5a68897aa46b5f2290 | 445899270803bd1026e43d773f828443c3f8d609 | refs/heads/master | 2020-09-16T20:01:21.728336 | 2020-04-07T06:07:22 | 2020-04-07T06:07:22 | 223,875,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,958 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 3 17:31:38 2019
State Machine of xilva
@author: ibukidev
"""
# read csv
import rospy,rospkg
from xilva_core.msg import Evans
import threading, sys, time
import numpy as np
from sensor_msgs.msg import Joy
import modules.utils as utils
import modules.topics as topics
from modules.protocols import read_dfcsv
### environment variables ###
_RATE = 50 # ros rate
_driveunits = 50
#_KDC = 127.32395447
_KDC = 57
_RES = 0.01999
_filename = sys.argv[1]
#_filename = 'lookaround'
# open the .csv
rospack = rospkg.RosPack()
csvpath = rospack.get_path('xilva_core')+'/data/csv/'+_filename+'.csv'
_MASK = [1]*15
df = read_dfcsv(csvpath)
class csvslave():
def __init__(self, _df):
# csv
self._df =df
self._timelist = []
self._motionlist = []
self._lastmotion = [0.0]*50
self._payload = [0.0]*50
self._payload_float = [0.0]*50
self._margin_to_target = [0.0]*50
self._time_interval = 0.0
# messages
self._pub_msg = Evans()
# publishers
self.pub = rospy.Publisher(topics.slave[4], Evans, queue_size = 10)
# subscribers
### calculations ###
def rad2cmd(self):
# multiply mask first
for i in range(0,len(_MASK)):
self._df[self._df.columns[i+1]] = _MASK[i] * self._df[self._df.columns[i+1]]
for i in range(0, len(self._df)):
# time append
if (i == 0):
self._timelist.append(float(self._df.ix[i,0])) # init
else:
self._timelist.append(float(self._df.ix[i,0])-float(self._df.ix[i-1,0])) # interval
# listize panda frame
templist = list(self._df.ix[i,1:])
self._motionlist.append(templist)
def make_msg_and_pub(self, msgid, seq, payload, publisher):
# make message
self._pub_msg.header.stamp = rospy.Time.now()
self._pub_msg.level = seq
self._pub_msg.name = 'hsmap'
self._pub_msg.msgid = msgid
self._pub_msg.payload = payload
# publish message
publisher.publish(self._pub_msg)
def joint_to_where(self):
self._lastmotion = self._payload # current motion
for i in range(len(self._motionlist)):
if (i==0):
pass
else:
# save lastmotion
self._margin_to_target = self._lastmotion # image the motion
self._margin_to_target = np.array(self._motionlist[i])-np.array(self._lastmotion) # compare margin
lines_nb = int(self._timelist[i]/_RES) #times of given res frequency
step_to_target = _KDC * self._margin_to_target/lines_nb # for each frequency, linear subposition
# add process
for lines in range(0, lines_nb):
self._payload_float = self._payload_float + step_to_target
self._payload = list(np.array(self._payload_float, dtype = 'int32'))
self.make_msg_and_pub(3,2, self._payload, self.pub)
#print(self._payload)
time.sleep(_RES)
#print(self._payload_float)
self._lastmotion = self._motionlist[i]
return None
def start(self):
rospy.loginfo("HUMANOID SIMULATION MODEL MAPPING")
loop_rate = rospy.Rate(_RATE)
self.rad2cmd()
self.joint_to_where()
#rospy.spin()
## generate command by comparing it with maximum minimum
# send message
if __name__ == "__main__":
# pose class
hsm = csvslave(df)
nh = rospy.init_node("StateMachine")
hsm.start()
# init nodes
| [
"[email protected]"
] | |
4e15f6da34b8ba4cb280f775a5b62ae86d165a97 | a8d25ee748c22b10697ab209feea537a8ad74e5e | /icj/wsgi.py | 51e08989fe1f9bc7f78ba7b2c50dd8a32a4abc0e | [
"MIT"
] | permissive | akkozha/interac | ae878635f7d30262feac1e9e081d34d9ae7f119e | 0d71d4ce302fd02554811a7a587209bc63619e30 | refs/heads/master | 2022-09-12T22:41:36.592878 | 2020-05-31T19:09:07 | 2020-05-31T19:09:07 | 255,415,102 | 0 | 0 | null | 2020-04-19T19:43:40 | 2020-04-13T18:51:27 | HTML | UTF-8 | Python | false | false | 383 | py | """
WSGI config for icj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "icj.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
6e21c9da97e93c15e681d95f120e8061f007e2ed | 99dffdf8bce0b2394add23cbbbc744e800da795f | /day5/SHOPPING/core/shopping.py | 1897068bcf0c4317b7c98c921dfca6bd16522a43 | [] | no_license | woruochengfeng/learning_python | 12519462e14d71a832229c6e89d049e3d3450d8b | 974a6e60fb8fcec716a77d61dce821ad1a7ce652 | refs/heads/master | 2021-01-21T12:53:34.867149 | 2016-07-12T04:09:24 | 2016-07-12T04:09:24 | 58,536,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,451 | py | #!/usr/bin/env python3
# Author: Zhangxunan
import sys
import os
import time
from prettytable import PrettyTable
from SHOPPING.core import shop_db
from SHOPPING.core import public
from ATM.api import common_api
USER_INFO = {'is_login': False}
SHOPPING_CART = []
def goods_list():
"""
商品列表
:return:商品列表字典
"""
shoppingmall = {
'汽车类':
[('BMW X3', '33333'),
('Audi Q5', '33355'),
('Pasate', '55566'),
('Tesla Model S', '8888888')],
'家电类':
[('小米电视3S', '8999'),
('乐视超级电视X65', '5998'),
('格力空调', '3000'),
('海尔洗衣机', '4000')],
'衣服类':
[('HM', '299'),
('UNIQLO', '199'),
('JackJones', '300'),
('Adidas', '700')],
'手机类':
[('小米手机5', '1999'),
('iPhone6s Plus', '6888'),
('华为P9 Plus', '4388'),
('OPPO R9 Plus', '3299')]
}
return shoppingmall
def register():
"""
注册
:return:None
"""
user_dict = shop_db.get_user_info()
username = input('请输入注册的用户名:')
password = input('请输入密码:')
confirm_password = input('请再次输入密码:')
if username in user_dict:
print('此用户已注册,请换个用户名再试!')
return
else:
if username == '' or password == '':
print('用户名或密码不能为空')
return
if password == confirm_password:
user_dict[username] = [password, '']
shop_db.update_user_info(user_dict)
public.write_log(username, '注册成功')
print('注册成功,您现在可以登录了!')
else:
print('两次输入的密码不一致!')
def login():
"""
登录
:return:None
"""
user_dict = shop_db.get_user_info()
username = input('用户名:')
password = input('密码:')
if username in user_dict and password == user_dict[username][0]:
USER_INFO['username'] = username
USER_INFO['card_id'] = user_dict[username][1]
USER_INFO['is_login'] = True
public.write_log(username, '登录成功')
print('登录成功!')
else:
if username == '' or password == '':
print('用户名或密码不能为空!')
else:
public.write_log(username, '尝试登录失败,用户名或密码错误')
print('用户名或密码错误!')
def shopping(goods, numbers):
"""
购物车,将选中的商品加入购物车
:param goods:商品名称
:param numbers: 商品数量
:return:None
"""
flag = False
if SHOPPING_CART:
for item in SHOPPING_CART:
if goods[0] == item[0]:
item[2] += numbers
item[3] += int(goods[1]) * numbers
public.write_log(USER_INFO['username'], '两次购买 %s,自动将订单合并' % goods[0])
flag = True
break
if flag:
return
shop_cart = [goods[0],
goods[1],
numbers,
int(goods[1]) * numbers]
SHOPPING_CART.append(shop_cart)
public.write_log(USER_INFO['username'], '将%d个%s加入购物车' % (shop_cart[2], shop_cart[0]))
return
def write_shopping_history():
"""
写购物记录,将购物记录写到文件里
:return:None
"""
current_time = time.strftime('%Y-%m-%d %H:%M:%S')
shopping_records_dict = shop_db.get_shopping_records()
if USER_INFO['username'] in shopping_records_dict:
shopping_records_dict[USER_INFO['username']][current_time] = SHOPPING_CART
else:
shopping_records_dict[USER_INFO['username']] = {current_time: SHOPPING_CART}
shop_db.update_shopping_records(shopping_records_dict)
public.write_log(USER_INFO['username'], '购物记录写到文件里')
def read_shopping_history():
"""
读购物记录,将文件里的购物记录读出来
:return:None
"""
shopping_records_dict = shop_db.get_shopping_records()
if USER_INFO['username'] in shopping_records_dict:
list_key = list(shopping_records_dict[USER_INFO['username']].keys())
list_key.sort()
x = PrettyTable(["商品名称", "单价", "数量", "小计"])
x.align["商品名称"] = "l"
x.padding_width = 1
for item in list_key:
print('时间:', item)
for shop_records in shopping_records_dict[USER_INFO['username']][item]:
x.add_row(shop_records)
print(x)
x.clear_rows()
public.write_log(USER_INFO['username'], '查看购物记录!')
else:
print('您还没有购物记录!')
def credit_bind():
"""
绑定银行卡
:return:
"""
user_dict = shop_db.get_user_info()
if not USER_INFO['card_id']:
card_id = input('请输入要绑定的信用卡号:')
password = input('请输入信用卡密码:')
result = common_api.credit_bind(card_id, password)
if result:
user_dict[USER_INFO['username']][1] = card_id
USER_INFO['card_id'] = card_id
shop_db.update_user_info(user_dict)
public.write_log(USER_INFO['username'], '绑定信用卡成功,卡号为:%s' % card_id)
print('绑定信用卡%s成功' % card_id)
else:
public.write_log(USER_INFO['username'], '绑定信用卡失败,卡号或密码错误!')
print('绑定信用卡失败,卡号或密码错误!')
else:
print('您已经绑定信用卡,卡号为:%s' % USER_INFO['card_id'])
def checkout(total_amount):
"""
结账
:param total_amount:总金额
:return: True:结账成功,False:结账失败,None:其它
"""
print('您绑定的信用卡号是:%s' % USER_INFO['card_id'])
password = input('请输入此信用卡号的密码:')
result = common_api.checkout(USER_INFO['card_id'], password, total_amount)
if result['code'] == 0:
return True
elif result['code'] == 1 or result['code'] == 2:
return False
else:
return
def shopping_cart():
"""
购物车,如果不结账,购物车里的信息会保存到文件里,重新登录后程序会读文件。
:return:None:付款失败
"""
total_amount = 0
if SHOPPING_CART:
x = PrettyTable(["商品名称", "单价", "数量", "小计"])
x.align["商品名称"] = "l"
x.padding_width = 1
for item in SHOPPING_CART:
total_amount += item[3]
x.add_row(item)
print(x)
print('总计: %d 元' % total_amount)
confirm_checkout = input('是否要结账?(默认为结账 Y/y N/n):')
if confirm_checkout == 'Y' or confirm_checkout == 'y' or confirm_checkout == '':
if not USER_INFO['card_id']:
print('您还没绑定信用卡,请先张绑定之后再付款!')
return
result = checkout(total_amount)
if result:
public.write_log(USER_INFO['username'], '结账成功,总共消费%d元' % total_amount)
print('成功付款,总共消费%d元,欢迎下次光临!' % total_amount)
else:
public.write_log(USER_INFO['username'], '付款失败,密码错误或余额不足')
print('付款失败,密码错误或余额不足')
return
write_shopping_history()
SHOPPING_CART.clear()
shop_db.update_shopping_cart(USER_INFO['username'], SHOPPING_CART)
public.write_log(USER_INFO['username'], '清空购物车')
elif confirm_checkout == 'N' or confirm_checkout == 'n':
public.write_log(USER_INFO['username'], '取消结账')
print('取消结账!')
else:
print('无效的输入!')
else:
print('购物车为空!')
def goods_menu():
"""
显示购物菜单
:return:
"""
goods = goods_list()
goods_class = list(goods.keys())
goods_class.sort()
while True:
for i, v in enumerate(goods_class, 1):
print(i, v)
class_num = input('请选择品类(b/back 返回上一级):')
if class_num.isdigit():
class_num = int(class_num)
if class_num < 0 or class_num > 4:
print('请输入有效的编号!')
continue
else:
while True:
for i, v in enumerate(goods[goods_class[class_num - 1]], 1):
print(i, ' '.join(v))
goods_num = input('请选择(b/back 返回上一级):')
if goods_num.isdigit():
goods_num = int(goods_num)
if goods_num < 0 or goods_num > 4:
print('请输入有效的编号:')
continue
else:
numbers = input('请输入购买的商品数量:')
if numbers.isdigit():
numbers = int(numbers)
shopping(goods[goods_class[class_num - 1]][goods_num - 1], numbers)
shop_db.update_shopping_cart(USER_INFO['username'], SHOPPING_CART)
elif goods_num == 'b' or goods_num == 'back':
break
elif goods_num == 'q' or goods_num == 'quit':
sys.exit(0)
else:
print('必须输入编号!')
elif class_num == 'b' or class_num == 'back':
break
elif class_num == 'q' or class_num == 'quit':
sys.exit(0)
else:
print('必须输入编号!')
def main_menu():
"""
登录后显示的菜单,主要功能有购物,购物记录,购物车(购物车包含结账功能),
:return: None
"""
login()
SHOPPING_CART.extend(shop_db.get_shopping_cart(USER_INFO['username']))
public.write_log(USER_INFO['username'], '读取购物车信息')
menu_main = """
1.购-物 2.购物记录
3.购物车 4.绑定银行卡
5.返-回 0.退-出
"""
while True:
print(menu_main)
menu_num = input('请选择:')
if menu_num.isdigit():
menu_num = int(menu_num)
if menu_num == 1:
goods_menu()
elif menu_num == 2:
read_shopping_history()
elif menu_num == 3:
shopping_cart()
elif menu_num == 4:
credit_bind()
elif menu_num == 5:
break
elif menu_num == 0:
sys.exit(0)
else:
print('请输入有效的编号!')
else:
if menu_num == '':
continue
else:
print('只能输入编号!')
def start_menu():
"""
开始菜单
:return:None
"""
menu_start = """
1.登录 2.注册
0.退出
"""
print(menu_start)
def main():
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
log_dir = os.path.join(base_dir, 'logs')
public.set_logging(log_dir, 'debug')
while True:
start_menu()
num = input('请选择:')
if num.isdigit():
num = int(num)
if num == 1:
main_menu()
elif num == 2:
register()
elif num == 0:
sys.exit(0)
else:
print('请选择有效的序号!')
else:
if num == '':
pass
else:
print('无效的输入!')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d0df0d7c9c87f063d625ecfe9a1cb3e0c7b3c77e | 461f3883272ce81d9f8c7616202f67c8423e9f4a | /shifumi.py | 7015a14eb93181f3cf18ad10b0cbbbfb75662308 | [] | no_license | RubenPain/Shifumi-Python | cbe88b69f92556072b59fd862e05d12918d48888 | 4bd25d0a4af70b81ff708d412604175ee4ed1ef5 | refs/heads/main | 2023-02-28T21:08:52.939748 | 2021-01-29T10:26:31 | 2021-01-29T10:26:31 | 334,109,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | import random
# Liste des possibilités pour que l'ordi puisse faire un choix aléatoire
shifumi = ['p','c','f']
# On demande au joueur le nombre de fois qu'il veut jouer avec un input qui sera un nombre donc que l'on convertit en int
nb_manche_voulu = int(input("Entrez le nombre de manche que vous souhaitez ?"))
# Initialisations des variables
score_j = 0
score_ord = 0
nb_manche = 0
# La condition tant que pour jouer le nombre de manches souhaité
while nb_manche < nb_manche_voulu:
# On demande le choix du joueur et on prend le choix de l'ordi de façon aléatoire
choix_joueur = input("Entrez votre choix Pierre(p), Ciseaux(c) et Feuille(f) ?")
choix_ordi = random.choice(shifumi)
# Toutes les possibilités dans lesquelles le joueur gagne
if (choix_joueur == 'p' and choix_ordi == 'c') or (choix_joueur == 'c' and choix_ordi == 'f') or (choix_joueur == 'f' and choix_ordi == 'p'):
print("Joueur gagne !")
score_j+=1
# Toutes les possibilités dans lesquelles il y a égalité
elif (choix_joueur == 'p' and choix_ordi == 'p') or (choix_joueur == 'c' and choix_ordi == 'c') or (choix_joueur == 'f' and choix_ordi == 'f'):
print("Egalité !")
# Toutes les possibilités dans lesquelles le joueur perd
elif (choix_joueur == 'c' and choix_ordi == 'p') or (choix_joueur == 'f' and choix_ordi == 'c') or (choix_joueur == 'p' and choix_ordi == 'f'):
print("Ordi gagne !")
score_ord+=1
# On incrémente le nombre de manche après en avoir joué une
nb_manche+=1
# On compare les scores connaitre le résultat du match
if score_j>score_ord:
print("Joueur gagne le match ! J:"+str(score_j)+" vs O:"+str(score_ord))
elif score_ord == score_j:
print("Egalité dans ce match ! J:"+str(score_j)+" vs O:"+str(score_ord))
else:
print("Ordi gagne le match ! J:"+str(score_j)+" vs O:"+str(score_ord))
| [
"[email protected]"
] | |
6016cf6e6ee8058cced8b5f78c72ddd592b26710 | cf0ab8503d4d704045070deea1e2125375711e86 | /apps/users/backup.py | 5c37a669ed406380966df39f592acca9dfab8e3b | [] | no_license | faierbol/syncano-platform | c3c6468600115752fd9fa5e46a0ad59f75f6bc9c | 879111874d1ef70418b4890cf970720b0a2be4d8 | refs/heads/master | 2023-07-20T10:13:40.066127 | 2021-02-08T15:01:13 | 2021-02-08T15:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from apps.backups import site
from .models import Group, Membership, User, UserSocialProfile
site.register([Group, User, Membership, UserSocialProfile])
| [
"[email protected]"
] | |
597899983290a2e260588684b07817ad1d83a958 | 05b06c2586c46d24d8bec1fe9d322040c10589a8 | /Practice/0527(1_김형림)_2.py | 9a52e7c0191ef485b3b403b3d8439867d6f1849a | [] | no_license | gentle-potato/Python | 392955bdafd27a426950a4af1c3746a9b899d106 | e96b754037d39f75d6b6d50f2b82202433d975d5 | refs/heads/master | 2023-06-02T06:30:48.605472 | 2021-06-12T02:29:29 | 2021-06-12T02:29:29 | 376,185,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | print('====== 2. 두 개의 숫자 더하기 ======')
def sum(x, y) :
# 숫자가 저장된 파일을 불러와서 공백을 기준으로 분할
# 저장할 파일 또한 불러옴
f = open(x, 'r', encoding='utf-8')
g = open(y, 'a', encoding='utf-8')
data = f.read()
data = data.split()
data = list(map(int, data))
# print(data)
# 두 수의 합을 리스트로 만들어서, 최종적으로 세로로 출력
results = []
for i in range(len(data)) :
if i % 2 == 0 :
results.append(data[i] + data[i+1])
else :
results.append(0)
for i in range(len(results)) :
if i % 2 == 0 :
if data[i+1] < 0 :
g.write(f'{data[i]}-{abs(data[i+1])}={results[i]:.1f}\n')
else :
g.write(f'{data[i]}+{data[i+1]}={results[i]:.1f}\n')
f.close()
g.close()
if __name__ == '__main__' :
sum('sum.txt', 'save.txt') | [
"[email protected]"
] | |
01206fb8a2748f3bb5b6dec804c5b46f81cf2c3a | dfc78e8c5502d08b1d24bfe439f02580ceeec272 | /dataProcess/case_study.py | fba80b51632b4ee30ec33865f2617acc076c2ded | [] | no_license | Observer007/trajectory-prediction | 135a118c06b7aed45c3df5ad65de3a855cf94a6d | b6a7197c9d37ea98a267e27a8034055731c47444 | refs/heads/master | 2020-04-28T08:41:06.135778 | 2019-03-12T04:56:30 | 2019-03-12T04:56:30 | 175,137,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | import numpy as np
import math
id = 'LnxMB9evozs6BouF_irH0cowsiNv1gir0_0'
size, grids = 'chengdu', '50'
grid_output_file = "../data/test/test-"+size+"-grid"+grids+"-10.txt"
road_file = '../data/roadnet/road-'+size+'-'+grids+'.txt'
def DistanceBetweenMeter(geo1, geo2):
R = 6378137
lonA, latA = geo1[0]/180*math.pi, geo1[1]/180*math.pi
lonB, latB = geo2[0]/180*math.pi, geo2[1]/180*math.pi
return R*math.acos(min(1.0, math.sin(math.pi/2-latA)*math.sin(math.pi/2-latB)*
math.cos(lonA-lonB) + math.cos(math.pi/2-latA)*math.cos(math.pi/2-latB)))
def read_road_info(road_file):
grid_num = 0
with open(road_file, 'r') as file:
for line in file:
if line.strip().__len__()>3:
grid_num+=1
grid2cor = []
intra_feas = []
with open(road_file, 'r') as file:
for line in file:
line = line.strip().split('\t')
if len(line) < 2:
continue
grid2cor.append([float(line[1]), float(line[2])])
intra_feas.append([line[i] for i in range(3, line.__len__())])
# print(DistanceBetweenMeter(grid2cor[4510], grid2cor[3082]))
assert grid2cor.__len__() == grid_num
return grid2cor, intra_feas
grid2cor, intra_feas = read_road_info(road_file)
with open(grid_output_file, 'r') as file:
for line in file:
line = line.strip().split(',')
grid_line = line[1:]
dis_matrix = np.zeros([11,11])
if id == line[0]:
for i in range(grid_line.__len__()-1):
for j in range(i+1, grid_line.__len__()):
dis_matrix[i][j] = \
DistanceBetweenMeter(grid2cor[int(grid_line[i].split(':')[0])],
grid2cor[int(grid_line[j].split(':')[0])])
print(dis_matrix) | [
"[email protected]"
] | |
83d1da1c1daae2797dc13c1eab88ea238f074e7c | bdb3f2c3b1181dc62792f34679b26197e01c90cf | /loanchain/lib/python2.7/site-packages/eth_keys/backends/base.py | 2f513e1dd910d999460ebb37f243af0198060b04 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | adithyabsk/loanchain | 8a590a5a2a7fc7707b6c4576baf029fadb99cf04 | 03cc809c5a804af6735acf8e7cd2023930bfc303 | refs/heads/master | 2023-05-08T08:05:27.956496 | 2019-06-23T17:19:26 | 2019-06-23T17:19:26 | 109,518,819 | 5 | 5 | null | 2021-05-24T04:36:13 | 2017-11-04T18:15:25 | Python | UTF-8 | Python | false | false | 1,000 | py | from eth_keys.datatypes import (
PrivateKey,
PublicKey,
Signature,
)
class BaseECCBackend(object):
def __init__(self):
self.PublicKey = type(
'{0}PublicKey'.format(type(self).__name__),
(PublicKey,),
{'_backend': self},
)
self.PrivateKey = type(
'{0}PrivateKey'.format(type(self).__name__),
(PrivateKey,),
{'_backend': self},
)
self.Signature = type(
'{0}Signature'.format(type(self).__name__),
(Signature,),
{'_backend': self},
)
def ecdsa_sign(self, msg_hash, private_key):
raise NotImplementedError()
def ecdsa_verify(self, msg_hash, signature, public_key):
return self.ecdsa_recover(msg_hash, signature) == public_key
def ecdsa_recover(self, msg_hash, signature):
raise NotImplementedError()
def private_key_to_public_key(self, private_key):
raise NotImplementedError()
| [
"[email protected]"
] | |
b0be5808ed645488d7987fcbe54f168491eb9e09 | 2b52e32f8ba65202078bde0173eb8e972434d3f8 | /Python_Algorithm/Baek/2748.py | 6e74594da15b2bdff8ba38f683a8aaa31d276af4 | [] | no_license | HoeYeon/Algorithm | 7167c463922227c0bc82e43940f7290fc1fa16af | 0e5ce2a3347d733bbaa894391cbf344fcb5161d6 | refs/heads/master | 2020-09-08T17:27:56.654485 | 2020-08-02T08:23:46 | 2020-08-02T08:23:46 | 221,195,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | arr = [-1 for i in range(91)]
arr[0] = 0
arr[1] = 1
for i in range(2,91):
arr[i] = arr[i-1] + arr[i-2]
print(arr[int(input())])
| [
"[email protected]"
] | |
5735640f846f4edf3f2937684a92c2ccec6aa52f | 6c1ae1e57f58e79bd4c139f47d0b535bc5b3edb2 | /imdb/imdb/items.py | 684b935c283747987031f5e9130484666fe1a2f2 | [] | no_license | stephensh24/scrapy_proj | 5108a71718e9c7c6ae71b9d4c7fecb084cf60669 | 646d05a4d6dd889e3813f9f48d7bc26442072cf6 | refs/heads/master | 2021-06-20T22:19:16.424469 | 2021-01-08T22:23:48 | 2021-01-08T22:23:48 | 133,443,288 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ImdbItem(scrapy.Item):
title = scrapy.Field()
# imdb_rating = scrapy.Field()
# meta_rating = scrapy.Field()
# genre = scrapy.Field()
release_date = scrapy.Field()
MPAA_rating = scrapy.Field()
# run_time = scrapy.Field()
director = scrapy.Field()
actors = scrapy.Field()
male_teen_rating = scrapy.Field()
male_youngAdult_rating = scrapy.Field()
male_adult_rating = scrapy.Field()
male_elder_rating = scrapy.Field()
male_ratingCount = scrapy.Field()
# male_teen_ratingCount = scrapy.Field()
# male_youngAdult_ratingCount = scrapy.Field()
# male_adult_ratingCount = scrapy.Field()
# male_elder_ratingCount = scrapy.Field()
female_teen_rating = scrapy.Field()
female_youngAdult_rating = scrapy.Field()
female_adult_rating = scrapy.Field()
female_elder_rating = scrapy.Field()
female_ratingCount = scrapy.Field()
# female_teen_ratingCount = scrapy.Field()
# female_youngAdult_ratingCount = scrapy.Field()
# female_adult_ratingCount = scrapy.Field()
# female_elder_ratingCount = scrapy.Field()
non_USusers = scrapy.Field()
# non_UScount = scrapy.Field()
us_users = scrapy.Field()
# us_count = scrapy.Field()
| [
"[email protected]"
] | |
57d551573f3a4aea5e19af6a23bce27eb609c8df | a3e8c6d1cf599587092ecc474dbdac9024dbca75 | /zabbbix4i.py | 0a1dcdb5c52bf2b65d66cfca3b7d8aad01df74eb | [] | no_license | DarkkFox/work | 7532a6f46995f54176abeedc6ad3f3a0a94c7105 | e5d29cf0bb87a5bfd3765a35ea82ef78223ee60c | refs/heads/main | 2023-03-08T11:18:14.446097 | 2021-02-24T12:17:59 | 2021-02-24T12:17:59 | 341,881,890 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.operators.ssh_operator import SSHOperator
from airflow.contrib.hooks.ssh_hook import SSHHook
from datetime import datetime, timedelta
import pprint
default_args = {
'owner': 'airflow',
'depends_on_past': True,
'start_date': datetime(2019, 6, 25),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('zabbix4', default_args=default_args, schedule_interval=timedelta(minutes=1))
conn = PostgresHook(postgres_conn_id='agg2_db_pg')
def query_func(ds, query, **kwargs):
print(ds)
print(query)
result = ''
for rec in conn.get_records(query):
result = ''.join(map(str,(rec)))
return result
sshHook = SSHHook(ssh_conn_id='agg2')
query1 = \
"""
SELECT write_lag();
"""
query2 = \
"""
SELECT replay_lag();
"""
query3 = \
"""
SELECT flush_lag();
"""
query4 = \
"""
SELECT repl_state();
"""
ds = '{{ ds }}'
command1 = "echo '{0}' > /usr/share/zabgres/write_lag".format(query_func(ds, query1))
command2 = "echo '{0}' > /usr/share/zabgres/replay_lag".format(query_func(ds, query2))
command3 = "echo '{0}' > /usr/share/zabgres/flush_lag".format(query_func(ds, query3))
command4 = "echo '{0}' > /usr/share/zabgres/repl_state".format(query_func(ds, query4))
t1 = SSHOperator(
task_id="sshtask1",
command=command1,
ssh_hook=sshHook,
remote_host="10.127.33.41",
dag=dag
)
t2 = SSHOperator(
task_id="sshtask2",
command=command2,
ssh_hook=sshHook,
remote_host="10.127.33.41",
dag=dag
)
t3 = SSHOperator(
task_id="sshtask3",
command=command3,
ssh_hook=sshHook,
remote_host="10.127.33.41",
dag=dag
)
t4 = SSHOperator(
task_id="sshtask4",
command=command4,
ssh_hook=sshHook,
remote_host="10.127.33.41",
dag=dag
)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
| [
"[email protected]"
] | |
e2a006c5fd5dda139a4ffa565d5595d9b86724ef | d469cced20011312c23b97339c6cb67ae136508a | /te/TE_WORK.py | 953d6edca3806d8e8153e867a969d4d58196c26c | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | priyadarshitathagat/te-ns | 0d52ee3c704af1d188269c8f2813e4f358849460 | 8ef414a0eafe59b6ec4521ec3c31df8d964acc98 | refs/heads/main | 2023-04-20T14:20:41.349610 | 2021-05-09T04:36:17 | 2021-05-09T04:36:17 | 366,020,627 | 0 | 0 | NOASSERTION | 2021-05-10T11:34:01 | 2021-05-10T11:34:00 | null | UTF-8 | Python | false | false | 29,827 | py | #**********************************************************************************************
# Traffic Emulator for Network Services
# Copyright 2020 VMware, Inc
# The BSD-2 license (the "License") set forth below applies to all parts of
# the Traffic Emulator for Network Services project. You may not use this file
# except in compliance with the License.
#
# BSD-2 License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE
#**********************************************************************************************
import sys, os
from TE_UTILS import convert, Logger, SysVQ
from sysv_ipc import ftok
import traceback
from collections import defaultdict
import paramiko
from scp import SCPClient
LOG_PATH = '/tmp/'
LOG_PATH_TE_DP = '/tmp/ramcache/'
# Check for the correctness of the LOG_PATH_TE_DP!
if(not(os.path.exists(LOG_PATH_TE_DP))):
print({'status' : False,
'statusmessage' : 'LOG_PATH_TE_DP does not exist'})
try:
lgr = Logger(' [ TE_WORKER ] ', os.path.join(LOG_PATH,'wrk.te.log')).getLogger()
lgr.info("Starting the TE WORK Process")
except:
with open('error.txt','a') as h:
h.write("Unable to get a Logger Object %s" %traceback.format_exc())
try:
import glob
import json, time, re, subprocess
import ast, string
import signal
from collections import OrderedDict
from rq.decorators import job as tejob
from rq import get_current_job
except Exception as e:
lgr.error("Import Failed.... %s" %traceback.format_exc() )
sys.exit(1)
# Needed for Root in Celery
os.environ.setdefault('C_FORCE_ROOT', 'true')
def __exec_cmd(cmd, stderr=True):
if stderr:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
else:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
# Both Python 2 & 3 compliant
if bool(out):
out = out.decode()
if bool(err):
err = err.decode()
return (out, err)
@tejob('te_broadcast_q')
def run_mgmt_command_te_dp(cmd=None):
try:
if cmd is None:
return {"status" : False, "statusmessage":"Command cannot be none"}
(out, err) = __exec_cmd(cmd)
lgr.info("Cmd=%s, Output=%s and err=%s" %(cmd, str(out), str(err)))
return {"status" : True, "statusmessage" : {"err" : err, "out": out}}
except:
return {"status" : False, "statusmessage" : "Exception Occurred: %s" %traceback.format_exc()}
@tejob('te_broadcast_q')
def tech_support(my_ip, remote_ip, remote_user, remote_pwd, remote_path, type_of_logs):
try:
folders_to_make = [os.path.join("/te_host/", 'te_%s_logs' %my_ip)]
files_to_send = []
tar_file = "te_{}_logs.tar.gz".format(my_ip)
tar_file_with_path = os.path.join("/te_host/", tar_file)
if(type_of_logs == "all" or type_of_logs == "setup"):
destination = os.path.join(folders_to_make[0], 'setup_logs/')
make_folder = False
#rq.log
file_interested = '/tmp/rq.log'
if(os.path.exists(file_interested)):
files_to_send.append((file_interested, destination))
make_folder = True
#download_docker.log
file_interested = '/te_root/download_docker.log'
if(os.path.exists(file_interested)):
files_to_send.append((file_interested, destination))
make_folder = True
if(make_folder):
folders_to_make.append(destination)
if(type_of_logs == "all" or type_of_logs == "process"):
destination = os.path.join(folders_to_make[0], 'process_logs/')
file_interested = '/tmp/ramcache/te_*.csv'
if(bool(glob.glob(file_interested))):
files_to_send.append((file_interested, destination))
folders_to_make.append(destination)
file_interested = '/tmp/*.log'
if(bool(glob.glob(file_interested))):
files_to_send.append((file_interested, destination))
folders_to_make.append(destination)
if(type_of_logs == "all" or type_of_logs == "core"):
destination = os.path.join(folders_to_make[0], 'core_logs/')
file_interested = '/opt/te/core.*'
if(bool(glob.glob(file_interested))):
files_to_send.append((file_interested, destination))
folders_to_make.append(destination)
if(type_of_logs == "all" or type_of_logs == "process" or type_of_logs=='core'):
file_interested_bin='/opt/te/bin/'
file_interested_src='/opt/te/src/'
file_interested_makefile='opt/te/Makefile'
destination=os.path.join(folders_to_make[0],'bin_src_file_dir/')
files_to_send.append((file_interested_bin, destination))
files_to_send.append((file_interested_src, destination))
files_to_send.append((file_interested_makefile, destination))
folders_to_make.append(destination)
if(bool(folders_to_make)):
str_folder_to_make = " ".join(folders_to_make)
cmd = "rm -rf %s; mkdir -p %s" %(folders_to_make[0], str_folder_to_make)
(out, err) = __exec_cmd(cmd)
lgr.info("Executing cmd=%s, out=%s, err=%s" %(cmd, out, err))
for (src, dest) in files_to_send:
cmd = "cp -r %s %s" %(src, dest)
(out, err) = __exec_cmd(cmd)
lgr.info("Executing cmd=%s, out=%s, err=%s" %(cmd, out, err))
cmd = "tar -zcvf {} {}/*; rm -rf {}".format(tar_file_with_path, folders_to_make[0], folders_to_make[0])
(out, err) = __exec_cmd(cmd)
lgr.info("Executing cmd=%s, out=%s, err=%s" %(cmd, out, err))
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if remote_pwd:
ssh.connect(remote_ip, username=remote_user, password=remote_pwd)
else:
ssh.connect(remote_ip, username=remote_user)
scp = SCPClient(ssh.get_transport())
scp.put(tar_file_with_path, remote_path)
except:
lgr.error(traceback.format_exc())
return {"status" : False,
"statusmessage" : "Unable to SCP but logs are available at /tmp/{} in {}".format(
tar_file, my_ip)}
return {'status':True, 'statusmessage':'Sent requested files'}
except:
lgr.error(traceback.format_exc())
return {'status':False, 'statusmessage':traceback.format_exc()}
@tejob('te_broadcast_q')
def start_te_dp(resource_config=None, session_config=None, resource_hash=None, session_hash=None, \
client_mgmt_ip=None, traffic_mode=None, traffic_profile=None, stat_dump_interval=None, \
metrics_enabled=False, memory_metrics_enabled=False, uniq_name=None, cpu=None, log_level=None):
#Utility Function Used By createPostFiles()
def isFilePresent(dockerPath, file):
return os.path.exists(dockerPath+file) or os.path.exists(file)
#Utility Function Used By createPostFiles()
def getSizeAndPathOfFile(dockerPath, fileName):
pathInDocker = dockerPath + fileName
sizeOfFile, extn = fileName.split('.')
sizeOfFile, denom = sizeOfFile.split('_')
# truncate doesn't accept B as a valid size
if(denom == 'B'):
return sizeOfFile, pathInDocker
else:
return sizeOfFile + denom, pathInDocker
def get_global_ns_if_available(resconv_sort):
set_of_pointed_if_profiles = set()
set_of_pointed_nses = set()
ns_to_if_profile_mapping = defaultdict(lambda: defaultdict(list))
if_prof_to_vip_idx_mapping = defaultdict(list)
any_default_ns = False
# Make a set of interface profile names that are pointed by various VIP object
counter = 0
for vip_obj in resconv_sort['resource-config']['vip-list']:
if_profile = vip_obj.get('interface-profile', None)
if if_profile is not None:
if_prof_to_vip_idx_mapping[if_profile].append(counter)
set_of_pointed_if_profiles.add(if_profile)
else:
any_default_ns = True
counter += 1
# If there are vips that are not pointing to any profile
# Then by default it is in root's namespace
if(any_default_ns):
set_of_pointed_nses.add("root")
# Parse through the above set, and get the unique n/w namespace names
# Stop the parse and return, if the number of unique n/w namespaces > 1
# If there is no namespace mentioned, it means we got to use 'root' ns, for that profile
for if_profile in set_of_pointed_if_profiles:
list_of_if_objs = resconv_sort['resource-config']['interface-profiles'].get(if_profile)
counter = 0
unique_nses = 0
for if_obj in list_of_if_objs:
ns_name = if_obj.get('ns', 'root')
set_of_pointed_nses.add(ns_name)
if(len(set_of_pointed_nses) > 1):
return None
ns_to_if_profile_mapping[ns_name][if_profile].append(counter)
counter += 1
# If we have only one namespace that is uniquely pointed then pop all the references of that namespace,
# and return True to start the process in that namespace, if the namespace is not `root`
if len(set_of_pointed_nses) == 1:
global_ns = set_of_pointed_nses.pop()
profile_to_if_idx_mapping = ns_to_if_profile_mapping.get(global_ns, None)
if(isinstance(profile_to_if_idx_mapping, dict)):
for profile_name, list_of_if_idx in profile_to_if_idx_mapping.items():
for if_idx in reversed(list_of_if_idx):
resconv_sort['resource-config']['interface-profiles'][profile_name][if_idx].pop('ns', None)
# If the `ns` is popped and if there no `if` field as well, there is no point having the obj
if 'if' not in resconv_sort['resource-config']['interface-profiles'][profile_name][if_idx]:
resconv_sort['resource-config']['interface-profiles'][profile_name].pop(if_idx)
# If by the above process, the list becomes empty, we got to delete all refs of the profile
if(not(bool(resconv_sort['resource-config']['interface-profiles'][profile_name]))):
for vip_idx in if_prof_to_vip_idx_mapping[profile_name]:
resconv_sort['resource-config']['vip-list'][vip_idx].pop('interface-profile')
resconv_sort['resource-config']['interface-profiles'].pop(profile_name)
# If by the above process, the `interface-profiles` becomes empty, we got to delete it
if(not(bool(resconv_sort['resource-config']['interface-profiles']))):
resconv_sort['resource-config'].pop('interface-profiles')
# If the unique namespace is root, then it is not a unique case, but rather a normal te_dp run
if global_ns == 'root':
return None
return global_ns
# More than 1 n/w namespace / ni mention of n/w namespaces
# Nothing fancy has to be done
return None
#Utility Function Used By start_te_dp() to make post files
def createPostFiles(resource_config, dockerPath = "/te_host/"):
'''
> Files name must be <size>_<denomination>.txt
> Floating numbers of size is not allowed (Use lower denominations instead)
> Denominations are:
B ==> Byte
K ==> kiloByte
M ==> megaByte
G ==> gigaByte
'''
try:
post_profiles = resource_config.get('post-profiles',{})
creationSuccess = True
for profile_name, post_list in post_profiles.items():
#Changes the path and saves it!!!
for req in post_list:
file = req.get('file',None)
if file is not None:
if re.match('^\d+_[B,K,M,G]\.txt$',file) is None:
# If file is not of the specified format, it must be already created at /tmp of host
# Else throw an error
if(not(isFilePresent(dockerPath, file))):
lgr.error("Post Files can be created having a name convention of <size>_<B/K/M/G>.txt and so %s not created" %file)
creationSuccess = False
else:
if(os.path.exists(file)):
req['file'] = file
elif(os.path.exists(dockerPath+file)):
req['file'] = dockerPath + file
# If File is present => Skip
elif(isFilePresent(dockerPath,file)):
lgr.debug("Skipping creation of Post File %s as it already exists" %file)
req['file'] = dockerPath + file
# Create File if file format is valid and it doesn't exist
else:
size, pathInDocker = getSizeAndPathOfFile(dockerPath, file)
cmd = "truncate -s " + size + " " + pathInDocker
lgr.info("Making Post File %s with cmd '%s'" %(file, cmd))
(out, err) = __exec_cmd(cmd)
req['file'] = dockerPath + file
if(creationSuccess and 'post-profiles' in resource_config):
resource_config['post-profiles'] = post_profiles
except:
lgr.error("ERROR IN CREATE POST FILES %s" %traceback.format_exc())
creationSuccess = False
return creationSuccess
try:
lgr.info("Start Called")
if resource_config is None or session_config is None or uniq_name is None or cpu is None:
return {'status' : False,
'statusmessage' : 'resource_config (or) session_config (or) uniq_name (or) cpu cannot be None'}
if(log_level is not None):
lgr.setLevel(log_level)
lgr.debug("resource config is %s session config is %s uniq_name is %s CPU %s"% (resource_config, session_config, uniq_name, cpu))
folderpath = os.path.join(LOG_PATH,uniq_name)
# Deleting any existing file/folder of name 'LOG_PATH+uniq_name'
if os.path.exists(folderpath):
lgr.debug( "Folder " + str(folderpath) + "is being removed")
cmd = 'rm -rf ' + str(folderpath)
(out, err) = __exec_cmd(cmd)
if os.path.exists(folderpath):
lgr.error( "Folder %s could not be deleted" %str(folderpath))
# Create a folder of name LOG_PATH+uniq_name and dumping resource-config
os.makedirs(folderpath)
resconv_sort = ast.literal_eval('{ \"resource-config\": ' + str(convert(resource_config)) + '}')
resconv_new = OrderedDict()
resconv = OrderedDict()
# If the all the vips point to the same namespace, then move the process to that namespace
# Valid only for TCP CLIENT
lgr.info("traffic_profile={}, traffic_mode={}".format(traffic_profile, traffic_mode))
if(traffic_profile == "TCP"):
global_ns_name = get_global_ns_if_available(resconv_sort)
if global_ns_name is not None:
resconv_new['global-ns']=global_ns_name
if 'interface-profiles' in resconv_sort['resource-config']:
resconv_new['interface-profiles']=resconv_sort['resource-config']['interface-profiles']
if 'get-profiles' in resconv_sort['resource-config']:
resconv_new['get-profiles']=resconv_sort['resource-config']['get-profiles']
if 'post-profiles' in resconv_sort['resource-config']:
resconv_new['post-profiles']=resconv_sort['resource-config']['post-profiles']
if 'udp-profiles' in resconv_sort['resource-config']:
resconv_new['udp-profiles']=resconv_sort['resource-config']['udp-profiles']
if 'default-get-post-ratio' in resconv_sort['resource-config']:
resconv_new['default-get-post-ratio'] = \
resconv_sort['resource-config']['default-get-post-ratio']
if 'set-cookies-resend' in resconv_sort['resource-config']:
resconv_new['set-cookies-resend'] = \
resconv_sort['resource-config']['set-cookies-resend']
if 'default-download-upload-ratio' in resconv_sort['resource-config']:
resconv_new['default-download-upload-ratio'] = \
resconv_sort['resource-config']['default-download-upload-ratio']
if 'vip-list' in resconv_sort['resource-config']:
resconv_new['vip-list']=resconv_sort['resource-config']['vip-list']
if 'port-list' in resconv_sort['resource-config']:
resconv_new['port-list']=resconv_sort['resource-config']['port-list']
if 'port-range' in resconv_sort['resource-config']:
resconv_new['port-range']=resconv_sort['resource-config']['port-range']
######### TCP PARAMS
if 'vip-selection-rr' in resconv_sort['resource-config']:
resconv_new['vip-selection-rr']=resconv_sort['resource-config']['vip-selection-rr']
if 'log-level' in resconv_sort['resource-config']:
resconv_new['log-level']=resconv_sort['resource-config']['log-level']
if 'tcp-keepalive-timeout' in resconv_sort['resource-config']:
resconv_new['tcp-keepalive-timeout']=resconv_sort['resource-config']['tcp-keepalive-timeout']
if 'tcp-connect-timeout' in resconv_sort['resource-config']:
resconv_new['tcp-connect-timeout']=resconv_sort['resource-config']['tcp-connect-timeout']
if 'disable-tcp-nagle' in resconv_sort['resource-config']:
resconv_new['disable-tcp-nagle']=resconv_sort['resource-config']['disable-tcp-nagle']
if 'http-version' in resconv_sort['resource-config']:
resconv_new['http-version']=resconv_sort['resource-config']['http-version']
if 'ssl-version' in resconv_sort['resource-config']:
resconv_new['ssl-version']=resconv_sort['resource-config']['ssl-version']
if 'ssl-groups' in resconv_sort['resource-config']:
resconv_new['ssl-groups']=resconv_sort['resource-config']['ssl-groups']
if 'cipher-suites' in resconv_sort['resource-config']:
resconv_new['cipher-suites']=resconv_sort['resource-config']['cipher-suites']
if 'ssl-session-reuse' in resconv_sort['resource-config']:
resconv_new['ssl-session-reuse']=resconv_sort['resource-config']['ssl-session-reuse']
if 'http-pipeline' in resconv_sort['resource-config']:
resconv_new['http-pipeline']=resconv_sort['resource-config']['http-pipeline']
if 'send-tcp-resets' in resconv_sort['resource-config']:
resconv_new['send-tcp-resets']=resconv_sort['resource-config']['send-tcp-resets']
"""
# Unsupported knobs as of today
# Lot of the knobs can be used to simulate attacks
if 'pipelen' in resconv_sort['resource-config']:
resconv_new['pipelen']=resconv_sort['resource-config']['pipelen']
if 'connect-only' in resconv_sort['resource-config']:
resconv_new['connect-only']=resconv_sort['resource-config']['connect-only']
if 'socket-linger' in resconv_sort['resource-config']:
resconv_new['socket-linger']=resconv_sort['resource-config']['socket-linger']
if 'enable-addr-reuse' in resconv_sort['resource-config']:
resconv_new['enable-addr-reuse']=resconv_sort['resource-config']['enable-addr-reuse']
if 'tcp-fastopen' in resconv_sort['resource-config']:
resconv_new['tcp-fastopen']=resconv_sort['resource-config']['tcp-fastopen']
if 'tcp-noclose' in resconv_sort['resource-config']:
resconv_new['tcp-noclose']=resconv_sort['resource-config']['tcp-noclose']
if 'app-timeout' in resconv_sort['resource-config']:
resconv_new['app-timeout']=resconv_sort['resource-config']['app-timeout']
if 'http-keep-alives' in resconv_sort['resource-config']:
resconv_new['http-keep-alives']=resconv_sort['resource-config']['http-keep-alives']
"""
resconv_new['log-path']=LOG_PATH_TE_DP
resconv['resource-config']=resconv_new
#Creates postfiles if not present in /te_host/ of the docker
#If the file has to be created on the fly, the naming conventions is "<size>_<denomination - B,K,M,G>.txt" (No decimal values are allowed)
#Path must not be mentioned for on the fly creation.
#If file is already present, the absolute path to the file must be given .
isPostFilesCreated = createPostFiles(resconv['resource-config'])
# Write the configs
resourceFile = os.path.join(folderpath,'resourceConfig.json')
fd_res = open(resourceFile, 'w')
fd_res.write(json.dumps(resconv,indent=3))
fd_res.close()
if(bool(session_config)):
sesconv = ast.literal_eval('{ \"session-config\": [' + str(convert(session_config)) + '] }')
sessionFile = os.path.join(folderpath,'sessionConfig.json')
fd_ses = open(sessionFile, 'w')
fd_ses.write(json.dumps(sesconv,indent=3))
fd_ses.close()
else:
sessionFile = None
result = {}
if(not(isPostFilesCreated)):
result['status'] = False
result['statusmessage'] = 'Unable to create post files'
result['result'] = None
return result
result['cpu_result'] = {}
# Get the TE_DP's info
LaunchPassed = False
#General command to start te_dp
"""
bin/te_dp -a CLIENT/SERVER -p TCP/UDP -i <client_ip> -c <cpu> \
-r resource_config -j resource_config's hash \
-s session_config -k session_config's hash \
-d stats_dump_interval -m(Optional to enable metrics) -t(Optional to memory metrics)
"""
if(bool(sessionFile)):
cmd = """nohup taskset -c {} /opt/te/bin/te_dp -a {} -p {} -i {} -c {} -r {} -j {} \
-s {} -k {}""".format(cpu, traffic_mode, traffic_profile, client_mgmt_ip, \
cpu, resourceFile, resource_hash, sessionFile, session_hash)
else:
cmd = "nohup taskset -c {} /opt/te/bin/te_dp -a {} -p {} -i {} -c {} -r {} -j {}".format(
cpu, traffic_mode, traffic_profile, client_mgmt_ip, cpu, resourceFile, resource_hash)
if(stat_dump_interval != 0):
if(memory_metrics_enabled):
cmd += " -t"
if(metrics_enabled):
cmd += " -m"
cmd += " -d {}".format(stat_dump_interval)
cmd += " > /dev/null & echo $!"
lgr.info("Starting TE_DP process using cmd='%s'" %cmd)
(out, err) = __exec_cmd(cmd, stderr=False)
try:
pid = int(out)
LaunchPassed = True
except:
pass
if LaunchPassed:
result['pid'] = pid
result['status'] = True
result['statusmessage'] = "Started TEDP in cpu=%d" %cpu
lgr.info("Launched the TEDP Process PID and TE_WORK's %d "%(pid))
lgr.info("Start Succeeded on cpu=%d pid=%d" %(cpu, pid))
if(stat_dump_interval != 0):
msg_q = SysVQ(1)
msg_q.send(str(pid))
return result
else:
result['status'] = False
result['statusmessage'] = "Unable to start te_dp in cpu={}".format(cpu)
lgr.info("Start Failed on cpu=%d" %cpu)
return result
except:
lgr.error( 'Exception Occured , Trace : %s' %traceback.format_exc() )
result = {
'status' : False,
'statusmessage' : 'Exception Occured , Trace : %s' %traceback.format_exc()
}
return result
def checkRequestValidity(pid):
# Check if all necessary params are given
if pid is None:
return {'status' : False,
'statusmessage' : "pid=" + str(pid) + " cannot be None"}
# Check if the te_dp process is actually running
tedp_alive = len(os.popen('ps -p '+str(pid)+'| grep te_dp').read())
if not tedp_alive:
return {'status' : False,
'statusmessage' : 'TEDP not alive'}
# If all Requests pass
return None
def remove_queue(q_id):
cmd = 'ipcrm -Q ' + str(q_id)
(out, err) = __exec_cmd(cmd)
def stop_te_dp_helper(pid):
#Tries to do a stop
#Does a soft kill
#Checks if the process is kill and queues are removed
#Else does a hard kill and removes the queues
def kill_process(sigID):
'''
Args:
sigID: Signal used to kill the process
Use signal 1(or)2 for softkill
Use signal 9 for hard kill
'''
cmd = 'kill -' + str(sigID) + ' ' + str(pid)
(out, err) = __exec_cmd(cmd)
def isProcessAlive():
return len(os.popen('ps -p '+str(pid)+'| grep te_dp').read())
#Soft Kill
#Make Queues and stop te_dp process
kill_process(signal.SIGINT) #Soft kill
time.sleep(2)
if(isProcessAlive()):
#Hard Kill
kill_process(9)
remove_queue(ftok("/tmp", pid, True))
time.sleep(2)
return not(isProcessAlive())
@tejob('te_broadcast_q')
def stop_te_dp(pid=None, uniq_name='TE_DP'):
try:
lgr.info("Stop Called")
isValidRequest = checkRequestValidity(pid)
if isValidRequest is not None:
lgr.warning("Dp process had already crashed %s" %str(pid))
if pid is not None:
remove_queue(ftok("/tmp", pid, True))
return {
'status' : True,
'statusmessage' : 'Dp process had already crashed'
}
#If 0 is returned, then no process of passed pid exist
if(stop_te_dp_helper(pid)):
lgr.info("Stop Succeeded %d" %pid)
return {
'status' : True,
'statusmessage' : 'te_dp of PID=' + str(pid) + ' is stopped',
'uniq-name' : uniq_name,
}
else:
lgr.info("Stop Failed %s" %str(pid))
return {
'status' : False,
'statusmessage' : 'Unable to kill the process %s' %str(pid)
}
except:
return_dict = {'status' : False,
'statusmessage' : 'Exception: %s' %traceback.format_exc()}
return return_dict
@tejob('te_broadcast_q')
def raw_update_te_dp(resource_config=None, session_config=None, resource_hash=None, session_hash=None, \
client_mgmt_ip=None, traffic_mode=None, traffic_profile=None, stat_dump_interval=None, \
metrics_enabled=False, memory_metrics_enabled=False, uniq_name=None, pid=None, cpu=None, log_level=None):
try:
lgr.info("Update Called")
stop_return_dict = stop_te_dp(pid=pid, uniq_name=uniq_name)
if(stop_return_dict['status'] == False):
lgr.info("Update's Stop Failed")
return stop_return_dict
start_return_dict = start_te_dp(resource_config=resource_config, session_config=session_config, \
resource_hash=resource_hash, session_hash=session_hash, \
client_mgmt_ip=client_mgmt_ip, traffic_mode=traffic_mode, \
traffic_profile=traffic_profile, stat_dump_interval=stat_dump_interval, \
metrics_enabled=metrics_enabled, memory_metrics_enabled=memory_metrics_enabled, \
uniq_name=uniq_name, cpu=cpu, log_level=log_level)
if(start_return_dict['status'] == False):
lgr.info("Update's Start Failed")
return start_return_dict
lgr.info("Update Suceeded")
return start_return_dict
except Exception as e:
return {'status' : False,
'statusmessage' : 'Exception: %s' %traceback.format_exc()}
| [
"[email protected]"
] | |
1735e4d9e099a3ed0ac6f2fe061c8bc8bc749db7 | 8f50c262f89d3dc4f15f2f67eb76e686b8f808f5 | /Calorimeter/CaloLocalHadCalib/share/cl_w_ooc.py | 7823a3efcffe358efc4b792501be52620786ff39 | [
"Apache-2.0"
] | permissive | strigazi/athena | 2d099e6aab4a94ab8b636ae681736da4e13ac5c9 | 354f92551294f7be678aebcd7b9d67d2c4448176 | refs/heads/master | 2022-12-09T02:05:30.632208 | 2020-09-03T14:03:18 | 2020-09-03T14:03:18 | 292,587,480 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,746 | py | # This is the job options file to create classifcation, weighting and
# out-of-cluster histos from ESD
###############################################################
#
# Job options file
#
# Authors: Sven Menke
#==============================================================
doClassification=True
doWeighting=True
doOutOfCluster=True
doDeadMaterial=True
if not 'ClassificationType' in dir():
ClassificationType="None"
#ClassificationType="ParticleID_HAD"
if not 'inFileName' in dir():
inFileName=['ESD.371530._000247.pool.root.1']
if not 'outFileNameLCC' in dir():
outFileNameLCC='classify.root'
#outFileNameLCC=ClassificationType+'_'+'classify.root'
if not 'outFileNameLCW' in dir():
outFileNameLCW='weights.root'
#outFileNameLCW=ClassificationType+'_'+'weights.root'
if not 'outFileNameLCO' in dir():
outFileNameLCO='ooc.root'
#outFileNameLCO=ClassificationType+'_'+'ooc.root'
if not 'outFileNameLCDM' in dir():
outFileNameLCDM='dmc.root'
#outFileNameLCDM=ClassificationType+'_'+'DeadMaterialTree.root'
#DetDescrVersion = 'ATLAS-GEO-08-00-01'
#DetDescrVersion = 'ATLAS-GEO-16-00-00'
DetDescrVersion = 'ATLAS-GEO-18-01-00'
include("RecExCond/AllDet_detDescr.py")
import AthenaCommon.Constants as Lvl
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon.AppMgr import theApp
## load POOL support
import AthenaPoolCnvSvc.ReadAthenaPool
## general job configuration
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
#from AthenaCommon.BeamFlags import jobproperties
#jobproperties.Beam.numberOfCollisions = 8.0
## configure object key store to recognize calo cells
from RecExConfig.ObjKeyStore import ObjKeyStore, objKeyStore
oks = ObjKeyStore()
oks.addStreamESD('CaloCellContainer', ['AllCalo'] )
## re-do topo clusters on EM scale
from CaloRec.CaloTopoClusterFlags import jobproperties
jobproperties.CaloTopoClusterFlags.doTopoClusterLocalCalib = False
jobproperties.CaloTopoClusterFlags.doCellWeightCalib = False
from CaloRec.CaloClusterTopoGetter import CaloClusterTopoGetter
CaloClusterTopoGetter()
topSequence.CaloTopoCluster.TopoCalibMoments.MomentsNames += ["ENG_CALIB_OUT_L","ENG_CALIB_FRAC_EM"]
topSequence.CaloTopoCluster.TopoCalibMoments.MatchDmType = 1 # 1=loose, 2=medium (default), 3=tight
theApp.EvtMax = -1
#--------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
MessageSvc = svcMgr.MessageSvc
MessageSvc.OutputLevel = Lvl.INFO
MessageSvc.infoLimit = 1000000
svcMgr.EventSelector.InputCollections = inFileName
if not 'doClassification' in dir():
doClassification=False
pass
if not 'doWeighting' in dir():
doWeighting=False
pass
if not 'doOutOfCluster' in dir():
doOutOfCluster=False
pass
if not 'doDeadMaterial' in dir():
doDeadMaterial=False
pass
if doClassification:
include ("CaloLocalHadCalib/GetLCClassification_jobOptions.py")
GetLCC.ClassificationType=ClassificationType
pass
if doWeighting:
include ("CaloLocalHadCalib/GetLCWeights_jobOptions.py")
GetLCW.ClassificationType=ClassificationType
pass
if doOutOfCluster:
include ("CaloLocalHadCalib/GetLCOutOfCluster_jobOptions.py")
GetLCO.ClassificationType=ClassificationType
pass
if doDeadMaterial:
include ("CaloLocalHadCalib/GetLCDeadMaterialTree_jobOptions.py")
pass
print topSequence
ChronoStatSvc = Service( "ChronoStatSvc" )
ChronoStatSvc.ChronoDestinationCout = True
ChronoStatSvc.PrintUserTime = True
ChronoStatSvc.PrintSystemTime = True
ChronoStatSvc.PrintEllapsedTime = True
AthenaPoolCnvSvc = Service( "AthenaPoolCnvSvc" )
AthenaPoolCnvSvc.UseDetailChronoStat = True
| [
"[email protected]"
] | |
347234d5055003262155ee4a4c48189cba112bad | 5060506edf8498a2c22e8f17ae6372bed72e9e0a | /021.py | e2bf4c616b6f843c2a804d03a1b2963a021d7ef4 | [] | no_license | danalenvargas/project-euler-solutions | d53d0ffdb6e882284e29130ebb4feaa60032806a | 9d98689575f36ce5e304dc747bbfa70f6ec54582 | refs/heads/master | 2020-03-22T17:21:52.854462 | 2018-07-10T07:08:43 | 2018-07-10T07:08:43 | 140,391,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import math
divSums = [0] * 10000
def getDivSum(num):
if num != 1 and divSums[num] == 0:
sum = 1
for i in range(2, math.floor(math.sqrt(num)) + 1):
if num%i == 0:
if num/i == i:
sum += i
else:
sum += i + num//i
divSums[num] = sum
return divSums[num]
def compute():
sum = 0
for i in range(2, 10000):
if getDivSum(i) < 10000 and i == getDivSum(divSums[i]) and i != divSums[i]:
sum += i
return sum
print(compute()) | [
"[email protected]"
] | |
f7928221cd822cfdd64a619751fc7103bef0c32a | be0c1b89e5f97f393dbaeffe587069f4a8e7ac09 | /sandbox/tests.py | 9587e8fa22155e2d117abcd144e0f810df8cf63e | [] | no_license | taozerui/DiVAE | 0430b2330bce05e0c0233bad0efb50126f68653c | 971087d9c9554ce11e22b11036d1194747fb5839 | refs/heads/master | 2023-07-09T21:38:17.832196 | 2021-06-22T15:08:27 | 2021-06-22T15:08:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py |
l=[5,6,7,8]
#01
#12
#23
#34
for i in range(0,len(l)):
print(i,i+1)
| [
"[email protected]"
] | |
4cc9e8278598a8c311ed01977bd5a0f2694fc8c5 | f8f8c8b172c8887e984b8789fd6dfc6489d51de7 | /vertices.py | db5265d3808ab4443b8ace17485a335ff26c3618 | [] | no_license | konlil/mypy | bda9c1f24fc42d145dec5e7b94966c7194d77706 | c6583450279bfd7a21b30ee523b4f55c56cb0599 | refs/heads/master | 2016-09-05T16:04:33.906696 | 2014-07-12T09:06:51 | 2014-07-12T09:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,583 | py | #coding: gbk
import struct
'''
Format C Type Python type Standard size Notes
x pad byte no value
c char string of length 1 1
b signed char integer 1 (3)
B unsigned char integer 1 (3)
? _Bool bool 1 (1)
h short integer 2 (3)
H unsigned short integer 2 (3)
i int integer 4 (3)
I unsigned int integer 4 (3)
l long integer 4 (3)
L unsigned long integer 4 (3)
q long long integer 8 (2), (3)
Q unsigned long long integer 8 (2), (3)
f float float 4 (4)
d double float 8 (4)
s char[] string
p char[] string
P void * integer (5), (3)
'''
class vt_baseType(object):
FORMAT = ''
SIZE = 0
def __init__(self, t):
self.__tuple = t
def __str__(self):
return str(self.__tuple)
class vt_XYZNUVTB(vt_baseType):
FORMAT = "3fI2fII"
SIZE = 32 #bytes
def __init__(self, t):
super(vt_XYZNUVTB, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = t[3]
self.uv = (t[4], t[5])
self.tangent = t[6]
self.binormal = t[7]
class vt_XYZNUV2(vt_baseType):
FORMAT = "3f3f2f2f"
SIZE = 40 #bytes
def __init__(self, t):
super(vt_XYZNUV2, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = (t[3], t[4], t[5])
self.uv = (t[6], t[7])
self.uv2 = (t[8], t[9])
class vt_XYZNUV(vt_baseType):
FORMAT = "3f3f2f"
SIZE = 32 #bytes
def __init__(self, t):
super(vt_XYZNUV, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = (t[3], t[4], t[5])
self.uv = (t[6], t[7])
class vt_XYZNDUV(vt_baseType):
FORMAT = "3f3fI2f"
SIZE = 36 #bytes
def __init__(self, t):
super(vt_XYZNDUV, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = (t[3], t[4], t[5])
self.color = t[6]
self.uv = (t[7], t[8])
class vt_XYZNUV2TB(vt_baseType):
FORMAT = "3fi2f2fII"
SIZE = 40 #bytes
def __init__(self, t):
super(vt_XYZNUV2TB, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = t[3]
self.uv = (t[4], t[5])
self.uv2 = (t[6], t[7])
self.tangent = t[8]
self.binormal = t[9]
class vt_XYZNUVIIIWW(vt_baseType):
FORMAT = "3fI2f5B"
SIZE = 29 #bytes
def __init__(self, t):
super(vt_XYZNUVIIIWW, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = t[3]
self.uv = (t[4], t[5])
self.index = t[6]
self.index2 = t[7]
self.index3 = t[8]
self.weight = t[9]
self.weight2 = t[10]
class vt_XYZNUVIIIWWTB(vt_baseType):
FORMAT = "=3fI2f5BII"
SIZE = 37 #bytes
def __init__(self, t):
super(vt_XYZNUVIIIWWTB, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = t[3]
self.uv = (t[4], t[5])
self.index = t[6]
self.index2 = t[7]
self.index3 = t[8]
self.weight = t[9]
self.weight2 = t[10]
self.tangent = t[11]
self.binormal = t[12]
class vt_XYZNUVITB(vt_baseType):
FORMAT = "3fI2ffII"
SIZE = 36 #bytes
def __init__(self, t):
super(vt_XYZNUVITB, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = t[3]
self.uv = (t[4], t[5])
self.index = t[6]
self.tangent = t[7]
self.binormal = t[8]
class vt_XYZNUVI(vt_baseType):
FORMAT = "3f3f2ff"
SIZE = 36 #bytes
def __init__(self, t):
super(vt_XYZNUVI, self).__init__(t)
self.pos = (t[0], t[1], t[2])
self.normal = (t[3], t[4], t[5])
self.uv = (t[6], t[7])
self.index = t[8]
class VertexHeader(object):
FORMAT = "64si"
SIZE = 68
def __init__(self):
self.vertexFormat = '' # 64 char
self.verticesCount = 0
class Vertices(object):
def __init__(self):
self.vertexStride = 0
self.vertices = []
self.vertexPositions = []
def load(self, res):
self.res = res
import resmgrdll
import ResMgr
print 'load vertices:', res
noff = self.res.find('.primitives/')
if noff < 0:
raise Exception
noff += 11
fileName = self.res[:noff]
tagName = self.res[noff+1:]
primFile = ResMgr.openSection(fileName)
print 'open primitive', fileName
if not primFile:
raise Exception, 'load vertices failed, %s' % fileName
print 'read vertices:', tagName
vertices = primFile.openSection(tagName)
vertices = bytes(vertices.asBinary)
if vertices:
vh = VertexHeader()
seg0 = 0
seg1 = VertexHeader.SIZE
vh.vertexFormat, vh.verticesCount = struct.unpack(VertexHeader.FORMAT, vertices[seg0:seg1])
vh.vertexFormat = vh.vertexFormat.split('\0')[0]
print vh.vertexFormat, vh.verticesCount
if vh.vertexFormat == 'xyznuvtb':
self.vertexStride = vt_XYZNUVTB.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUVTB.SIZE
vertex = vt_XYZNUVTB( struct.unpack(vt_XYZNUVTB.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
#print vertex
elif vh.vertexFormat == 'xyznuv2':
self.vertexStride = vt_XYZNUV2.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUV2.SIZE
vertex = vt_XYZNUV2( struct.unpack(vt_XYZNUV2.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
elif vh.vertexFormat == 'xyznuv':
self.vertexStride = vt_XYZNUV.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUV.SIZE
vertex = vt_XYZNUV( struct.unpack(vt_XYZNUV.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
elif vh.vertexFormat == 'xyznduv':
self.vertexStride = vt_XYZNDUV.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNDUV.SIZE
vertex = vt_XYZNDUV( struct.unpack(vt_XYZNDUV.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
elif vh.vertexFormat == 'xyznuv2tb':
self.vertexStride = vt_XYZNUV2TB.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUV2TB.SIZE
vertex = vt_XYZNUV2TB( struct.unpack(vt_XYZNUV2TB.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
elif vh.vertexFormat == 'xyznuviiiww':
self.vertexStride = vt_XYZNUVIIIWW.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUVIIIWW.SIZE
vertex = vt_XYZNUVIIIWW( struct.unpack(vt_XYZNUVIIIWW.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
elif vh.vertexFormat == 'xyznuviiiwwtb':
self.vertexStride = vt_XYZNUVIIIWWTB.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUVIIIWWTB.SIZE
vertex = vt_XYZNUVIIIWWTB( struct.unpack(vt_XYZNUVIIIWWTB.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
#print vertex
elif vh.vertexFormat == 'xyznuvitb':
self.vertexStride = vt_XYZNUVITB.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUVITB.SIZE
vertex = vt_XYZNUVITB( struct.unpack(vt_XYZNUVITB.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
elif vh.vertexFormat == 'xyznuvi':
self.vertexStride = vt_XYZNUVI.SIZE
for i in xrange(vh.verticesCount):
seg0 = seg1
seg1 = seg0 + vt_XYZNUVI.SIZE
vertex = vt_XYZNUVI( struct.unpack(vt_XYZNUVI.FORMAT, vertices[seg0:seg1]) )
self.vertices.append(vertex)
self.vertexPositions.append(vertex.pos)
else:
raise Exception, "Failed to recognise vertex format: %s" % vh.vertexFormat
| [
"[email protected]"
] | |
07d26a4f614d347f645b65a8d343f8cf14e41985 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /CHwX2o6rqrBsL4gzr_8.py | 4e06235d8f0753cd62c7b0b2ac07732e5ae46576 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
Check if a string `txt` is a title text or not. A title text is one which has
all the words in the text start with an upper case letter.
### Examples
check_title("A Mind Boggling Achievement") ➞ True
check_title("A Simple Python Program!") ➞ True
check_title("Water is transparent") ➞ False
### Notes
N/A
"""
def check_title(txt):
return txt.title()==txt
| [
"[email protected]"
] | |
0d0e5d82f76a75f1d2ae152b1275ee6388d0044f | a14dd601cde67f67d0ba38dfd1362f7c0109cef1 | /stacks/concept/increasing.py | 3dafd3ba680c63bc1712d971b17d5f054734cfd8 | [] | no_license | Meaha7/dsa | d5ea1615f05dae32671af1f1c112f0c759056473 | fa80219ff8a6f4429fcf104310f4169d007af712 | refs/heads/main | 2023-09-03T18:52:41.950294 | 2021-11-05T09:14:42 | 2021-11-05T09:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # T=n,S=n
def x(nums):
n, stack = len(nums), []
for i in range(n):
while stack and stack[-1] > nums[i]:
stack.pop()
stack.append(nums[i])
return stack
# T=n,S=n
def y(nums):
n, stack = len(nums), []
for i in range(n):
while stack and nums[stack[-1]] > nums[i]:
stack.pop()
stack.append(i)
return stack
for nums in [
[7, 6, 6, 5, 5, 4, 3, 2, 1],
[7, 6, 6, 5, 5, 4, 3, 8, 2, 1],
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
]:
print(x(nums), end=' ')
print(y(nums))
| [
"[email protected]"
] | |
2346a86e6c9480baef5bf11e5f21294dcbac2737 | 905b3a6a7323ab7dba65b4dc8ddeffc91981c04c | /Práticas/RE12/wc.py | 044747e863abbbd7ad4c2c30add77a83da446beb | [] | no_license | TitanicThompson1/FPRO | ada8d1e23a9e17bfafcd01431665bda54476de50 | af2399c1f0833bd34e149c64ab62b8ba8a4605ea | refs/heads/master | 2020-05-07T18:45:13.991996 | 2019-10-18T14:26:13 | 2019-10-18T14:26:13 | 162,582,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 20 09:13:48 2018
@author: up201706860
"""
def wc(filename):
result=()
nlines=0
ncharac=0
nwords=0
with open(filename,"r") as shakeit:
for line in shakeit:
nlines+=1
ncharac+=len(line)
oline=line.split()
nwords+=len(oline)
result=(nlines,nwords,ncharac)
return result
| [
"[email protected]"
] | |
9ea44e90d899ad74bce850db433f5ba17e10e09d | 646ea2c2096ed3e0b763eb69f0c0f1a367a7092d | /lecture_07/Ex_07/solid/autocorrelation_u.py | 315d673c1b0f83239f5045625a69ea3b8d40dd31 | [] | no_license | GiovanniPaleari/LSN_Exercises | 2d13fd8692b5b7dfaf7647bb20e4f44a4413da00 | e8446e3058d689768c00c2d96fb1b5239e944199 | refs/heads/master | 2020-06-22T07:21:50.344559 | 2019-07-18T23:55:54 | 2019-07-18T23:55:54 | 197,669,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from scipy.optimize import curve_fit
def f(x,a,b):
return a * np.exp(-b*x)
step, ene = np.loadtxt("inst_energy_solid.txt",usecols=(0,1), delimiter='\t', unpack='true')
tmax=300
c1=0
c2=0
autocorrelation = np.zeros(tmax)
for t in range(0,100000):
c1 += ene[t]**2
c2 +=ene[t]
sigma_2 = 1/len(step)*c1 - (1/len(step)*c2)**2
#print(sigma_2)
for t in tqdm(range(0,tmax)):
c3=0
c4=0
c5=0
delta=len(step)-t
for tt in range(0,delta):
c3 += ene[tt]*ene[tt+t]
c4 += ene[tt]
c5 += ene[tt+t]
autocorrelation[t] = (1/delta*c3 -1/(delta**2)*c4*c5)/sigma_2
#print(t)
#print(autocorrelation[t])
#print('\t')
x = np.arange(tmax)
#print(autocorrelation)
plt.plot(x, autocorrelation)
p_opt, p_cov = curve_fit(f, x, autocorrelation) #, bounds=([0,0,-1],[2,3,+3]))
y_fit = f(x,p_opt[0],p_opt[1])
plt.plot(x,y_fit) # plotting fitted function
print("optimized parameters [a,b] =")
print(p_opt)
print("parameters uncertainty =")
print(np.sqrt(np.diagonal(p_cov)))
tc = 1/p_opt[1]
print("Correlation time = ", tc)
plt.title('Energy Autocorrelation - Solid')
plt.xlabel('Monte Carlo Steps')
plt.ylabel('Autocorrelation')
plt.show()
| [
"[email protected]"
] | |
8b79f003dfce4ea13409253d4b77df4f94132382 | f8a9ddee8dfecad878a4b99e968c89386dd44408 | /api/app/models/filmes_assistidos.py | f1e479e364211321b6d858414dfb8874fe01eaf6 | [] | no_license | cayque16/Gerencia-Filmes-API | e62131f3c8985646864290ff5c9f538c70237cd9 | ac40d63ea00e63935cd32b884c2c1fb02e26baa3 | refs/heads/master | 2022-07-20T22:01:57.403595 | 2020-06-12T13:46:09 | 2020-06-12T13:46:09 | 147,935,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from app import db,ma
class FilmesAssistidos(db.Model):
__tablename__ = 'FilmesAssistidos'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
idFilme = db.Column(db.Integer,db.ForeignKey('Filmes.id'), nullable=False)
idUsuario = db.Column(db.Integer,db.ForeignKey('Usuarios.id'))
idAnoMeta = db.Column(db.Integer,db.ForeignKey('Usuarios.id'))
posAno = db.Column(db.Integer, nullable=False)
data = db.Column(db.DateTime)
inedito = db.Column(db.Integer)
def __init__(self,idFilme,idUsuario,idAnoMeta,posAno,data,inedito):
self.idFilme = idFilme
self.idUsuario = idUsuario
self.idAnoMeta = idAnoMeta
self.posAno = posAno
self.data = data
self.inedito = inedito
class FilmesAssistidosSchema(ma.Schema):
class Meta:
fields = ('id','idFilme','idUsuario','idAnoMeta','posAno','data','inedito')
assistido_schema = FilmesAssistidosSchema()
assistidos_schema = FilmesAssistidosSchema(many=True)
| [
"[email protected]"
] | |
f5ae17a47db14304ae7b9412ecb72093ce8142ba | 7d13216ee8fb57fb12aa36be6b32c9b5523f62f9 | /blog/models.py | 9f9a1223131e680bb74fbe21d2b7c289c86b2486 | [] | no_license | bayobit/portfolio-project | 18104625f00ae564de6070a3828123a60920949f | bb9b26fe5658d54901ef239ff51b8095b3051764 | refs/heads/master | 2022-12-17T02:24:46.364712 | 2020-04-03T11:07:15 | 2020-04-03T11:07:15 | 248,832,657 | 0 | 0 | null | 2022-12-08T03:05:14 | 2020-03-20T19:07:49 | Python | UTF-8 | Python | false | false | 566 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length = 200)
pubdate = models.DateTimeField(null = True, blank = True)
body = models.TextField(default = 'DEFAULT VALUE', blank = True, null=True)
image = models.ImageField(upload_to = 'images/')
def __str__(self): #display blog title in the admin page
return self.title
def summary(self):
return self.body[:100]
def pubdate_pretty(self):
return self.pubdate.strftime('%b %e %Y')
| [
"[email protected]"
] | |
f1584693b2003255098fa425e3b45a459f8e13fd | 4f2bbd19c228ac7b10e3e8d2ebddbabef677dd0f | /provided_code2.py | 57ac8c6e967bc0ac51a6c520df9878e48f245141 | [] | no_license | validum/Project-2 | a80cb8c287934fa506519ed39e8b6f7148cee6f2 | 263c20ae0190c2fc4be8ba7a6328f687901178bc | refs/heads/master | 2016-09-06T14:59:35.408955 | 2014-09-29T00:13:48 | 2014-09-29T04:25:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 20 01:26:22 2014
@author: Eric
"""
"""
Provided code for Application portion of Module 2
"""
# general imports
import urllib2
# CodeSkulptor import
#import simpleplot
#import codeskulptor
#codeskulptor.set_timeout(60)
# Desktop imports
#import matplotlib.pyplot as plt
############################################
# Provided code
def copy_graph(graph):
"""
Make a copy of a graph
"""
new_graph = {}
for node in graph:
new_graph[node] = set(graph[node])
return new_graph
def delete_node(ugraph, node):
"""
Delete a node from an undirected graph
"""
neighbors = ugraph[node]
ugraph.pop(node)
for neighbor in neighbors:
ugraph[neighbor].remove(node)
def targeted_order(ugraph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = copy_graph(ugraph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##########################################################
# Code for loading computer network graph
NETWORK_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_rf7.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
return answer_graph
| [
"[email protected]"
] | |
ee029148e28c59ea905123f4f6cda00447ed9b81 | 25c24c307853f1fa19238801ca92b64864d0d7aa | /analysis/compute_run_time.py | 37afbee37cb698be6c88402ab5cc0ef12e4369f7 | [] | no_license | maschimax/HPO-benchmarking-framework | 0aa179a4d7d62065c756a5c8a3b8de073c47c1dd | 4d130dee4f9fac37364e5cc8c474a89904860ac8 | refs/heads/master | 2023-06-12T09:33:43.266104 | 2021-07-05T15:10:23 | 2021-07-05T15:10:23 | 294,675,140 | 1 | 2 | null | 2021-07-05T15:10:23 | 2020-09-11T11:17:48 | Python | UTF-8 | Python | false | false | 1,580 | py | import pandas as pd
import os
import numpy as np
total_computing_time = 0
os.chdir('..')
start_path = './hpo_framework/results'
datasets = ['turbofan', 'scania', 'sensor', 'blisk', 'surface']
for _, data_dirs, _ in os.walk(start_path):
for this_dir in data_dirs:
if this_dir not in datasets:
continue
dataset = this_dir
run_time_this_set = 0
for _, _, files in os.walk(os.path.join(start_path, this_dir)):
for this_file in files:
if 'logs_' in this_file:
print('Reading: ', this_file)
this_log = pd.read_csv(os.path.join(start_path, this_dir, this_file))
for run in this_log['Run-ID'].unique():
run_time = this_log.loc[(this_log['Run-ID'] == run), 'timestamps'].max()
# print('Time of %s: %f' % (run, run_time))
if np.isnan(run_time):
continue
run_time_this_set += run_time
total_computing_time += run_time
print('Run time on %s data set [s]: %f' % (dataset, run_time_this_set))
print('Total benchmarking time: %f [s]' % total_computing_time)
print('Total benchmarking time: %f [min]' % (total_computing_time/60))
print('Total benchmarking time: %f [h]' % (total_computing_time/60/60))
print('Total benchmarking time: %f [d]' % (total_computing_time/60/60/24))
print('Total benchmarking time: %f [w]' % (total_computing_time/60/60/24/7))
| [
"[email protected]"
] | |
74433f5854149b11a48817cd9fe2f5498c072f15 | 85d48fe19f90493a88345c81352ce9899fd9e893 | /201801month/测试/redis查询.py | 6d469c409be7b5f28a55620848abd707af288b25 | [] | no_license | Hanlen520/learnPython | bebd558ec05521bf01d97782b126fb9f816c6f0e | 904fc65ee7865cdd025bd34c37f1cc19527fdf86 | refs/heads/master | 2020-04-03T10:29:38.948615 | 2018-02-22T03:40:49 | 2018-02-22T03:40:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | #!/user/bin/env python
# -*- coding:utf-8 -*-
import redis
import configparser
if __name__ == '__main__':
pool = redis.ConnectionPool(host='47.96.171.4',password='0987654321rfvujmtgbyhn', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
config = configparser.ConfigParser() | [
"[email protected]"
] | |
4656e82b4a0e1e219df2cac50b5b179e3218a254 | d7e39888dd15f6470e5b1e9e7453570c7c22f8ca | /pyfilter/inference/batch/varapprox/__init__.py | 1576b25d7693ff9001c6329aab10912016e2d6b1 | [
"MIT"
] | permissive | quinfer/pyfilter | 4673eb21d9fabb34a8a35029561c713cbe3aac5c | 90e42702c79c12b0bdfcb709fed05cb8efd90686 | refs/heads/master | 2022-12-28T22:35:53.266333 | 2020-10-17T16:09:03 | 2020-10-17T16:09:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from .meanfield import StateMeanField, ParameterMeanField
from .base import BaseApproximation
from .lowrank import StateLowRank | [
"[email protected]"
] | |
2a156761f19d32fd35f8fb3976db4e56d8a25eac | 64fea0c51de5f8ec9a4450edc779be173daaed20 | /posts/migrations/0001_initial.py | 40b4e7380fa9e6432c06ffa34226d401c4e805a8 | [] | no_license | piyushkummaar/Unravelling-Destination | 314449e6e335e4e1c4c0606e039da262d30a4514 | 814285c50b67feb3623d3cb88b98ad3a29c2efb1 | refs/heads/master | 2022-12-09T15:22:00.346930 | 2020-06-22T06:17:27 | 2020-06-22T06:17:27 | 236,319,729 | 0 | 1 | null | 2022-12-08T05:25:56 | 2020-01-26T14:00:16 | JavaScript | UTF-8 | Python | false | false | 5,628 | py | # Generated by Django 3.0.2 on 2020-01-27 04:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(upload_to='')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Author',
'verbose_name_plural': 'Authors',
'db_table': 'tbl_author',
'managed': True,
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'db_table': 'tbl_category',
'managed': True,
},
),
migrations.CreateModel(
name='ContactUs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=100)),
('lname', models.CharField(max_length=100)),
('subject', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('message', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Contact Us',
'verbose_name_plural': 'Contact Us',
'db_table': 'tbl_contactus',
'managed': True,
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('overview', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('content', tinymce.models.HTMLField()),
('thumbnail', models.ImageField(upload_to='')),
('featured', models.BooleanField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Author')),
('categories', models.ManyToManyField(to='posts.Category')),
('next_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='next', to='posts.Post')),
('previous_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='previous', to='posts.Post')),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Add Blog',
'db_table': 'tbl_post',
'managed': True,
},
),
migrations.CreateModel(
name='Signup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Newsletter',
'verbose_name_plural': 'Newsletters',
'db_table': 'tbl_newsletter',
'managed': True,
},
),
migrations.CreateModel(
name='PostView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'PostView',
'verbose_name_plural': 'Postviews',
'db_table': 'tbl_postview',
'managed': True,
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('content', models.TextField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
'db_table': 'tbl_comment',
'managed': True,
},
),
]
| [
"[email protected]"
] | |
13f368d3105cf82c22ae22f3756a5f196aabc4dd | a252f9733ee0ccaf17ff90e4d9fba04f669c5ec1 | /functions.py | 731ecf3697f087e69b513019e24fa87b29fa712f | [] | no_license | firecy/20170619sd | 429ca509754516816f5a9879469bcc2440a51771 | 40c13a8a7bab19372d4ed38e082548abc6ac63d7 | refs/heads/master | 2020-03-12T10:40:14.663400 | 2018-05-17T03:09:51 | 2018-05-17T03:09:51 | 130,578,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import timeit
from datetime import datetime
import time
import numpy as np
def minmax_standardization(x, x_min, x_max):
'''
this function realizes data minmax standardization.
x_nor = (x-x_min)/(x_max - x_min)
'''
for i in xrange(x.shape[0]):
for j in xrange(x.shape[1]):
if x[i, j] < x_min[j]:
x[i, j] = x_min[j]
if x[i, j] > x_max[j]:
x[i, j] = x_max[j]
x_nor = (x - x_min) / (x_max - x_min)
return x_nor
def get_usv(x):
x -= np.mean(x, axis=0)
cov = np.dot(x.T, x) / x.shape[0]
x_u, x_s, x_v = np.linalg.svd(cov)
return x_u, x_s
def zca_whitening(x, x_u, x_s, x_mean, epsilon):
'''
this function is aimed to reduce the relevance of data and noises.
'''
x -= x_mean
xrot = np.dot(x, x_u)
xpcawhite = xrot / np.sqrt(x_s + epsilon)
xzcawhite = np.dot(xpcawhite, x_u.T)
xzcawhite += x_mean
return xzcawhite
def ts_ms(ts):
fault_timestamp = str(ts)
fault_timestamp_1 = datetime.strptime(fault_timestamp,'%Y_%m_%d_%H:%M:%S:%f')
fault_timestamp_2 = fault_timestamp_1.strftime('%Y-%m-%d %H:%M:%S:%f')
millisecond = int(time.mktime(fault_timestamp_1.timetuple()))
return fault_timestamp_2, millisecond
| [
"[email protected]"
] | |
80e87f0e8fa81384b648ddc15570d1b712756a1b | 68be8655542d5a02ef5836c32a309026d4389519 | /data-structures/breadth_first/test_breadth_first_graph.py | 2baee794ceebe06052d78043853ead9201bdbdae | [
"MIT"
] | permissive | jamesbond007dj/py-data-structures-and-algorithms | e51e7c6d1a3da667cfb666a26cddcdb1015a92a0 | ce66faa99f7c7e9c0f9d248a56d78c9ebf069e25 | refs/heads/master | 2020-09-23T17:06:25.384038 | 2020-02-04T06:38:57 | 2020-02-04T06:38:57 | 225,545,867 | 0 | 0 | MIT | 2020-02-04T06:37:18 | 2019-12-03T06:19:36 | Python | UTF-8 | Python | false | false | 1,429 | py | import pytest
from breadth_first_graph import Graph , Node_
def test_no_edges():
graph = Graph()
naboo = graph.add_node('naboo')
assert graph.breadth_first(naboo) == {naboo}
@pytest.mark.skip('pending')
def test_flight_none(test_graph):
graph = test_graph
set_list = ['naboo', 'pandora', 'earth']
assert graph.breadth_first('earth') == 'earth is not in graph'
@pytest.mark.skip('pending')
def test_breadth_first():
graph = Graph()
naboo = graph.add_node('naboo')
pandora = graph.add_node('pandora')
narnia = graph.add_node('narnia')
oregon = graph.add_node('oregon')
pluto = graph.add_node('pluto')
saturn = graph.add_node('saturn')
graph.add_edge(naboo, pandora, 150)
graph.add_edge(pandora, naboo, 200)
graph.add_edge(pandora, narnia, 350)
graph.add_edge(pandora, saturn, 360)
graph.add_edge(narnia, pandora, 370)
graph.add_edge(narnia, saturn, 500)
graph.add_edge(narnia, oregon, 550)
graph.add_edge(saturn, pandora, 600)
graph.add_edge(saturn, narnia, 650)
graph.add_edge(saturn, oregon, 700)
graph.add_edge(saturn, pluto, 710)
graph.add_edge(oregon, narnia, 730)
graph.add_edge(oregon, saturn, 740)
graph.add_edge(oregon, pluto, 750)
graph.add_edge(pluto, saturn, 760)
graph.add_edge(pluto, oregon, 800)
assert graph.breadth_first(naboo) == {naboo, oregon, pluto, narnia, saturn, pandora}
| [
"[email protected]"
] | |
3e6fde40d177c25c7745a280092164dfd9e66956 | 9e31f25fb3245277e13857fb68044df2d8876ec5 | /myPythonRobot10.py | 222a1a58a2b2c68befcc0df6afe760f5f9bb8c3d | [] | no_license | Wirevel/Python-Harjoitukset | 8d269f29127b71703cc50c50aeaaeb1a6283395f | 25c5bc1c9bf4f38bc47d6bec1d642beadf9f698b | refs/heads/main | 2023-05-30T23:18:43.714735 | 2021-07-02T07:03:11 | 2021-07-02T07:03:11 | 382,258,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py |
def iloinen():
print("MyPythonRobot on iloinen")
print("o o")
print(" #")
print("( )")
print(" -/")
totinen()
def totinen():
print("MyPythonRobot on totinen")
print("o o")
print(" #")
print("---")
viksu()
def viksu():
print("MyPythonRobot on viksu")
print("o o")
print(" #")
print("~ ~")
print(" ~~~")
iloinen()
| [
"[email protected]"
] | |
ed3aa68698a39b7c08f2ef8cab0ba7db94739f9b | 22094dd1ce0117a5f2ddf2407e4f935ff3af3f49 | /ucert-env/Scripts/explode.py | 88bcfb124a5e8b97cbd6625ae221413e1c3d4e09 | [] | no_license | dylreeves/UserRecert | f9c336b736b3a0bd684fbecc08ee7c89c35ff57b | 5f3fed72b3f867412fae89ece7ca507b5549a59b | refs/heads/master | 2022-11-29T21:10:56.021314 | 2016-06-12T13:37:37 | 2016-06-12T13:37:37 | 60,696,146 | 0 | 1 | null | 2022-11-18T23:02:26 | 2016-06-08T12:28:47 | Python | UTF-8 | Python | false | false | 2,469 | py | #!d:\proj\userrecert\ucert-env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
| [
"[email protected]"
] | |
85b7b76fb1a314e685b23246704e250fa7f17211 | c6c14b995f42dc1f6ef3670610c4b687ff62d67c | /blogmanager/wsgi.py | ce4e0d0baed40c418b28a16ff6e0290d5386be31 | [] | no_license | attacheaka/learn-django-api-rest | 1858eb694123650416e1fc597d9cbdee001cf404 | 9d6c8d570e84ad4598001b2f21b9a3fb1d84b5d3 | refs/heads/master | 2023-03-06T05:46:08.585859 | 2021-02-18T14:31:37 | 2021-02-18T14:31:37 | 340,072,685 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for blogmanager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blogmanager.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
c4abd56378477047360154552ce5d4a59c297a33 | 4181653a63b6fc3d1a8b8b3739b5a5fc62cd3491 | /fava/core/watcher.py | b84d0b9ad3738650e2d543358af5377316bc02ed | [
"MIT"
] | permissive | rychipman/fava | aa31a87353a431f123ffbe4098b6426e8ca28ad9 | 91398610d1903a301c7c1e0c682df3176f4f4688 | refs/heads/master | 2020-09-06T21:50:08.311461 | 2019-11-08T14:39:44 | 2019-11-08T14:40:05 | 220,565,147 | 0 | 0 | MIT | 2019-11-08T23:44:50 | 2019-11-08T23:44:49 | null | UTF-8 | Python | false | false | 1,432 | py | """A simple file and folder watcher."""
import os
class Watcher:
"""A simple file and folder watcher.
For folders, only checks mtime of the folder and all subdirectories.
So a file change won't be noticed, but only new/deleted files.
"""
__slots__ = ["_files", "_folders", "_last_checked"]
def __init__(self):
self._files = []
self._folders = []
self._last_checked = 0
def update(self, files, folders):
"""Update the folders/files to watch.
Args:
files: A list of file paths.
folders: A list of paths to folders.
"""
self._files = list(files)
self._folders = list(folders)
self.check()
def check(self):
"""Check for changes.
Returns:
`True` if there was a file change in one of the files or folders,
`False` otherwise.
"""
latest_mtime = 0
for path in self._files:
mtime = os.stat(path).st_mtime_ns
if mtime > latest_mtime:
latest_mtime = mtime
for path in self._folders:
for dirpath, _, _ in os.walk(path):
mtime = os.stat(dirpath).st_mtime_ns
if mtime > latest_mtime:
latest_mtime = mtime
changed = bool(latest_mtime != self._last_checked)
self._last_checked = latest_mtime
return changed
| [
"[email protected]"
] | |
a5c1629ad13c1ecceaa8c56c94b6979ffcbaefa6 | 62b983f70dbaf744dcf93679960cee319beabbf3 | /backend/estoque/migrations/0003_alter_produto_options.py | 23e75ae545fc0f9c5af424dcb47ad1a6616f47da | [] | no_license | gelira/ecommerce | 6ac7aaec59a5034a87bb4a1b57f3c6d592e2ac93 | 784c02151296a986bcf5eea0639b3e0b7669ff28 | refs/heads/master | 2023-07-16T09:15:24.714381 | 2021-08-29T17:51:06 | 2021-08-29T17:51:06 | 391,683,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | # Generated by Django 3.2.5 on 2021-08-02 03:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('estoque', '0002_alter_loja_nome_url'),
]
operations = [
migrations.AlterModelOptions(
name='produto',
options={'ordering': ['nome']},
),
]
| [
"[email protected]"
] | |
7ba769afcde2364ea3c84f384398055ee839e430 | cb4a9b53eff6d8a0db984c0e5233416bcb54b969 | /src/python/codebay/l2tpadmin/vpneaseadmin.py | a03e3645dc3fbbf319439c35b560844e4cf0c12f | [
"WTFPL"
] | permissive | Cloudxtreme/vpnease-l2tp | d6f2f8d6480398494ad2eb9e0d414efe6696dc9d | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | refs/heads/master | 2021-05-29T16:17:02.563556 | 2013-09-05T09:44:37 | 2013-09-05T09:44:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,710 | py | """Swiss army knife VPNease administration helper.
Contains functionality required by all server-side elements: product web
server, database server, management server, DNS server, and monitoring
server. The intention is to collect "script crud" here, and use it by
invoking the tool from crontab, init-scripts, etc.
"""
import os, sys, textwrap
def usage():
print textwrap.dedent("""
Usage:
vpneaseadmin <category> <command> <parameters>
Commands:
[product web]
vpneaseadmin pw start
vpneaseadmin pw stop
[database]
vpneaseadmin db start
vpneaseadmin db stop
vpneaseadmin db create
vpneaseadmin db delete
vpneaseadmin db backup
vpneaseadmin db restore
vpneaseadmin db list-licenses
vpneaseadmin db list-licensekeys
vpneaseadmin db test-import-legacy-licenses
vpneaseadmin db test-license-fetch-loop
[management server]
vpneaseadmin ms start
vpneaseadmin ms stop
vpneaseadmin ms demoinfo
vpneaseadmin ms stats
vpneaseadmin ms backup
[dns server]
vpneaseadmin dns start
vpneaseadmin dns stop
vpneaseadmin dns update
[monitoring server]
vpneaseadmin mon check
[misc]
vpneaseadmin misc create-management-server-keypair
vpneaseadmin misc create-random-license-key
""")
def main():
try:
print >> sys.stderr, 'vpnease-admin...'
cat = sys.argv[1]
cmd = sys.argv[2]
args = sys.argv[3:]
print >> sys.stderr, 'cat: %s, cmd: %s, args: %s' % (cat, cmd, args)
if cat == 'pw':
from codebay.l2tpadmin import productwebserver as pw
if cmd == 'start':
pw.start(args)
elif cmd == 'stop':
pw.stop(args)
else:
raise Exception('unknown command %s for category %s' % (cmd, cat))
elif cat == 'db':
from codebay.l2tpadmin import databaseserver as db
if cmd == 'start':
db.start(args)
elif cmd == 'stop':
db.stop(args)
elif cmd == 'create':
db.create(args)
elif cmd == 'delete':
db.delete(args)
elif cmd == 'backup':
db.backup(args)
elif cmd == 'restore':
db.restore(args)
elif cmd == 'list-licenses':
db.list_licenses(args)
elif cmd == 'list-licensekeys':
db.list_licensekeys(args)
elif cmd == 'test-import-legacy-licenses':
db.test_import_legacy_licenses(args)
elif cmd == 'test-license-fetch-loop':
db.test_license_fetch_loop(args)
else:
raise Exception('unknown command %s for category %s' % (cmd, cat))
elif cat == 'ms':
from codebay.l2tpadmin import managementserver as ms
if cmd == 'start':
ms.start(args)
elif cmd == 'stop':
ms.stop(args)
elif cmd == 'demoinfo':
print ms.get_demo_license_info()
elif cmd == 'backup':
backup_file = ms.write_backup_file()
print 'Backup file written to: %s' % backup_file
elif cmd == 'stats':
stats = ms.get_stats()
print stats
else:
raise Exception('unknown command %s for category %s' % (cmd, cat))
elif cat == 'dns':
from codebay.l2tpadmin import dnsserver as dns
if cmd == 'start':
dns.start(args)
elif cmd == 'stop':
dns.stop(args)
else:
raise Exception('unknown command %s for category %s' % (cmd, cat))
elif cat == 'mon':
from codebay.l2tpadmin import monitoringserver as mon
if cmd == 'check':
mon.MonitoringServer().check(args)
else:
raise Exception('unknown command %s for category %s' % (cmd, cat))
elif cat == 'misc':
if cmd == 'create-management-server-keypair':
import os
from codebay.l2tpserver import helpers
if not os.path.exists('vpnease-ca-private-aes256.pem'):
raise Exception('Must be in the directory with VPNease CA files')
out_privkey = 'management-server-private.pem'
out_cert = 'management-server-certificate.pem'
print 'Creating management server keypair to files: %s, %s' % (out_privkey, out_cert)
helpers.generate_ca_signed_certificate('vpnease-ca-private-aes256.pem',
'vpnease-ca-certificate.pem',
'vpnease-ca-serialfile.txt',
out_privkey,
out_cert,
nbits=1024, common_name='VPNease Management Server',
organization='VPNease')
elif cmd == 'create-random-license-key':
from codebay.common import licensekey
print licensekey.create_random_license()
else:
raise Exception('unknown command %s for category %s' % (cmd, cat))
else:
raise Exception('unknown category %s' % cat)
except:
usage()
raise
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4010dcd01567b77fc39425e40408ee523937c632 | adbf786e80748a912e53d8bf21ecd538256a58c0 | /venv1/bin/wheel | 314edb72f8cea54b23fc8338c1e7e1e1e859ddb6 | [] | no_license | mengluGuo/FoodOrderSystem | 7cb965540bec06dbc2f8e858a0df2914465d3ec7 | 5014b935a181a88d4b521426a69b5dcdb917bfeb | refs/heads/master | 2021-04-27T10:16:18.258660 | 2018-03-20T11:46:45 | 2018-03-20T11:46:45 | 122,533,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/Users/lulu/git/FoodOrderSystem/venv1/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
37f4ea466ad73a0e87c966932f73d3075b721125 | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/oauth/provider.py | 8fd3d5a2a89a42ba246d3b5cab32aa35ac56cb82 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,862 | py |
from __future__ import unicode_literals
from datetime import datetime, timedelta
from uuid import UUID
from flask import after_this_request, g, session
from oauthlib.oauth2 import FatalClientError, InvalidClientIdError
from fossir.core.db import db
from fossir.modules.oauth import logger, oauth
from fossir.modules.oauth.models.applications import OAuthApplication
from fossir.modules.oauth.models.tokens import OAuthGrant, OAuthToken
from fossir.util.date_time import now_utc
class DisabledClientIdError(FatalClientError):
error = 'application_disabled_by_admin'
@oauth.clientgetter
def load_client(client_id):
try:
UUID(hex=client_id)
except ValueError:
raise InvalidClientIdError
app = OAuthApplication.find_first(client_id=client_id)
if not app.is_enabled:
raise DisabledClientIdError
return app
@oauth.grantgetter
def load_grant(client_id, code): # pragma: no cover
return OAuthGrant.get(client_id, code)
@oauth.grantsetter
def save_grant(client_id, code, request, *args, **kwargs):
expires = datetime.utcnow() + timedelta(seconds=120)
grant = OAuthGrant(client_id=client_id, code=code['code'], redirect_uri=request.redirect_uri,
user=session.user, scopes=request.scopes, expires=expires)
grant.save()
return grant
@oauth.tokengetter
def load_token(access_token, refresh_token=None):
if not access_token:
return None
# ugly hack so we can know in other places that we received a token
# e.g. to show an error if there was an invalid token specified but
# not if there was no token at all
g.received_oauth_token = True
try:
UUID(hex=access_token)
except ValueError:
# malformed oauth token
return None
token = OAuthToken.find(access_token=access_token).options(db.joinedload(OAuthToken.application)).first()
if not token or not token.application.is_enabled:
return None
token_id = token.id # avoid DetachedInstanceError in the callback
@after_this_request
def _update_last_use(response):
with db.tmp_session() as sess:
# do not modify `token` directly, it's attached to a different session!
sess.query(OAuthToken).filter_by(id=token_id).update({OAuthToken.last_used_dt: now_utc()})
sess.commit()
return response
return token
@oauth.tokensetter
def save_token(token_data, request, *args, **kwargs):
# For the implicit flow
# Check issue: https://github.com/lepture/flask-oauthlib/issues/209
if request.grant_type == 'authorization_code':
user = request.user
elif request.grant_type is None: # implicit flow
user = session.user
else:
raise ValueError('Invalid grant_type')
requested_scopes = set(token_data['scope'].split())
token = OAuthToken.find_first(OAuthApplication.client_id == request.client.client_id,
OAuthToken.user == user,
_join=OAuthApplication)
if token is None:
application = OAuthApplication.find_one(client_id=request.client.client_id)
token = OAuthToken(application=application, user=user)
db.session.add(token)
token.access_token = token_data['access_token']
token.scopes = requested_scopes
elif requested_scopes - token.scopes:
logger.info('Added scopes to %s: %s', token, requested_scopes - token.scopes)
# use the new access_token when extending scopes
token.access_token = token_data['access_token']
token.scopes |= requested_scopes
else:
token_data['access_token'] = token.access_token
token_data.pop('refresh_token', None) # we don't support refresh tokens so far
token_data.pop('expires_in', None) # our tokens currently do not expire
return token
| [
"[email protected]"
] | |
6c8b2915cd322fd87501fbabdb757f794131f79c | d05f8b20a6c4c3cb6779d472c02da672573dfbf8 | /app/getter/parsers/Novaya.py | 66931e2f0a9a1ddedf076d3680eae3a7cf981f5a | [
"MIT"
] | permissive | sweetSTEAM/NewsSummarizer | af992ecc9d92c17bfc0536bcd5c277109f99bc04 | b179e2414f5bf7f9c0c79f10bc4611e5385b3a96 | refs/heads/master | 2022-12-13T09:04:49.666297 | 2018-05-24T13:20:30 | 2018-05-24T13:20:30 | 133,338,299 | 1 | 0 | MIT | 2022-12-08T02:05:27 | 2018-05-14T09:37:54 | Python | UTF-8 | Python | false | false | 2,439 | py | import requests
import datetime
import re
from bs4 import BeautifulSoup
from .BaseParser import BaseParser
import time
from multiprocessing import cpu_count
class Novaya(BaseParser):
"""docstring for Novaya"""
def __init__(self, **kwargs):
super(Novaya, self).__init__(
id='NOVAYA',
root_url='https://www.novayagazeta.ru/news/',
api_url='https://content.novayagazeta.ru/news/',
page_type='json', **kwargs)
self.news_per_page = 100
self.offset = 0
def _get_news_list(self, content):
""" Getting list of news from page content """
return sorted(content['items'],
key=lambda x: x['dtime'], reverse=True)
def _get_news_params_in_page(self, news):
news_url = news['code']
news_date = self._str_to_time(news['dtime'])
try:
topic = news['rubric']['code']
except Exception as e:
topic = None
title = news['title']
popularity = news['views_count']
return news_url, news_date, topic, title, popularity
def _page_url(self):
# Example: https://content.novayagazeta.ru/news?offset=0&limit=100
return self.api_url + '?offset=%d&limit=%d' % (
self.offset, self.news_per_page)
def _next_page_url(self):
self.offset += self.news_per_page
return self._page_url()
def _parse_news(self, news_params):
""" Getting full news params by direct url """
url = self.root_url + news_params[0]
json_ = self._get_content(self.api_url + news_params[0], 'json')
date = news_params[1]
topic = news_params[2]
title = news_params[3]
text = BeautifulSoup(json_['body'], 'lxml')
for div in text.find_all('div'):
div.decompose()
text = text.get_text()
popularity = news_params[4]
try:
tag = ','.join(list(
map(lambda x: x['title'], json_['tags'])))
except Exception:
tag = None
news_out = {'title': title, 'url': url, 'text': text,
'topic': topic, 'date': date, 'other': {'popularity': popularity, 'tag': tag}}
return news_out
def _str_to_time(self, time_str):
# 2017-01-31T18:59:00.000+03:00
return datetime.datetime.strptime(time_str[:19],
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=self.TZ).timestamp()
| [
"[email protected]"
] | |
9e0a698d18f48d080382cd0e0b7cbf02d57bd1b4 | d9cbf5beb4d81d4398fd4eea3e18f39bd9fb00a9 | /api/views.py | 154e947ae12efb8e0cd77bbd3a3f56872eedebdc | [] | no_license | SETTER2000/yamdb_final | cf6245089485449cb9abaf44d858bc01577dfde8 | 253001f693b4ec86006c41cc610a56c1e824edb5 | refs/heads/master | 2023-08-03T03:48:52.095137 | 2021-09-17T08:41:03 | 2021-09-17T08:41:03 | 406,097,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,171 | py | from functools import partial
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404
from django.utils.crypto import get_random_string
from rest_framework import filters, permissions, status, viewsets
from rest_framework.decorators import action, api_view
from rest_framework.exceptions import ParseError
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from api_yamdb.settings import DEFAULT_FROM_EMAIL, ROLES_PERMISSIONS
from .filters import TitleFilter
from .mixin import CreateListDestroyModelMixinViewSet
from .models import Category, Comment, Genre, Review, Title, User
from .permissions import IsAuthorOrReadOnly, PermissonForRole
from .serializers import (CategorySerializer, CommentSerializer,
GenreSerializer, ReviewSerializer, TitleSerializer,
UserSerializer)
class UserModelViewSet(viewsets.ModelViewSet):
"""Custщm User model with custom action."""
lookup_field = "username"
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (
partial(PermissonForRole, ROLES_PERMISSIONS.get("Users")),
)
@action(
methods=["PATCH", "GET"],
permission_classes=[permissions.IsAuthenticated],
detail=False,
url_path="me",
)
def user_me(self, request) -> Response:
"""Пользовательский URL-адрес для редактирования своего профиля."""
if request.method == "GET":
serializer = self.get_serializer(request.user)
return Response(serializer.data)
serializer = self.get_serializer(
request.user, data=request.data, partial=True
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class TitleModelViewSet(viewsets.ModelViewSet):
queryset = Title.objects.all()
serializer_class = TitleSerializer
permission_classes = (
partial(PermissonForRole, ROLES_PERMISSIONS.get("Titles")),
)
filterset_class = TitleFilter
def perform_create(self, serializer):
slugs_genre = self.request.POST.getlist("genre")
slug_category = self.request.data["category"]
category = get_object_or_404(Category, slug=slug_category)
title = serializer.save(category_id=category.id)
for slug in slugs_genre:
genre = get_object_or_404(Genre, slug=slug)
title.genre.add(genre)
def perform_update(self, serializer):
if "genre" in self.request.data:
slug_str = self.request.data["genre"]
slugs = [x.strip() for x in slug_str.split(",")]
genre = Genre.objects.none()
for i in slugs:
genre_a = Genre.objects.filter(slug=i)
genre = genre.union(genre_a)
genre_title = self.get_object().genre.all()
genre = genre_title.union(genre)
else:
genre = self.get_object().genre.all()
if "category" in self.request.data:
category = get_object_or_404(
Category, slug=self.request.data["category"]
)
else:
slug = self.get_object().category.slug
category = get_object_or_404(Category, slug=slug)
serializer.save(
genre=genre,
category_id=category.id,
)
class CategoryModelViewSet(CreateListDestroyModelMixinViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
permission_classes = (
partial(PermissonForRole, ROLES_PERMISSIONS.get("Categories")),
)
filter_backends = (filters.SearchFilter,)
search_fields = ("name",)
lookup_field = "slug"
def perform_create(self, serializer):
serializer.save(
name=self.request.data["name"], slug=self.request.data["slug"]
)
def perform_destroy(self, serializer):
serializer = get_object_or_404(Category, slug=self.kwargs.get("slug"))
serializer.delete()
class GenreModelViewSet(CreateListDestroyModelMixinViewSet):
queryset = Genre.objects.all()
serializer_class = GenreSerializer
permission_classes = (
partial(PermissonForRole, ROLES_PERMISSIONS.get("Genres")),
)
filter_backends = (filters.SearchFilter,)
search_fields = ("name",)
lookup_field = "slug"
def perform_create(self, serializer):
serializer.save(
name=self.request.data["name"], slug=self.request.data["slug"]
)
def perform_destroy(self, serializer):
serializer = get_object_or_404(Genre, slug=self.kwargs.get("slug"))
serializer.delete()
class ReviewModelViewSet(viewsets.ModelViewSet):
serializer_class = ReviewSerializer
permission_classes = (
(IsAuthenticatedOrReadOnly & IsAuthorOrReadOnly)
| partial(PermissonForRole, ROLES_PERMISSIONS.get("Reviews")),
)
def perform_create(self, serializer):
title = get_object_or_404(Title, pk=self.kwargs["title_id"])
user = User.objects.get(username=self.request.user)
if user is None:
raise ParseError("Неверный запрос!")
review = Review.objects.filter(
title=self.kwargs["title_id"], author=self.request.user.id
)
if review.exists():
raise ParseError(detail="Ваш отзыв уже существует!")
serializer.save(author=user, title=title)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
return Review.objects.filter(title_id=self.kwargs["title_id"])
class CommentModelViewSet(viewsets.ModelViewSet):
serializer_class = CommentSerializer
permission_classes = (
(IsAuthenticatedOrReadOnly & IsAuthorOrReadOnly)
| partial(PermissonForRole, ROLES_PERMISSIONS.get("Reviews")),
)
def perform_create(self, serializer):
review = get_object_or_404(Review, pk=self.kwargs["review_id"])
title = get_object_or_404(Title, pk=self.kwargs["title_id"])
serializer.save(author=self.request.user, review=review, title=title)
return Response(serializer.data, status=status.HTTP_200_OK)
def get_queryset(self):
return Comment.objects.filter(review_id=self.kwargs["review_id"])
@api_view(["POST"])
def email_auth(request):
"""Check email and send to it confirmation code for token auth."""
user = get_object_or_404(User, email=request.data["email"])
confirmation_code = get_random_string()
user.confirmation_code = confirmation_code
user.save()
send_mail(
subject="Код для генерации токена аутентификации YAMDB",
message=str(confirmation_code),
from_email=DEFAULT_FROM_EMAIL,
recipient_list=(request.data["email"],),
)
return Response(
data="Письмо с кодом для аутентификации",
status=status.HTTP_201_CREATED,
)
| [
"[email protected]"
] | |
57202db9f42478a5e54092ec297297a3c0178909 | baf2329e02c590467ff76ffbe50c7339cf5e049e | /files/api/views.py | c2f599c1774035507cd05fc6bb9e767416c5d5b1 | [] | no_license | jermained876/newphotobackend | d40a1c163408b192229abf293aea966dda59c31d | 336b20558536da0b88a97b1a8db7070d41244503 | refs/heads/main | 2023-06-12T23:40:02.390697 | 2021-07-03T15:50:30 | 2021-07-03T15:50:30 | 382,640,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,850 | py | from rest_framework.generics import ListAPIView, CreateAPIView, DestroyAPIView, RetrieveAPIView, UpdateAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.views import APIView
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from ..models import Files
from filetype.models import Filetype
from .serializer import FilesSerializer
from album.models import Album
from django.core.exceptions import ObjectDoesNotExist
from albumaccess.models import Albumaccess
from album.api.serializer import AlbumSerializers
from django.contrib.auth.models import Group
def changeFile(id):
try:
file = Files.objects.get(id=id)
except ObjectDoesNotExist:
raise ValidationError('No file found')
try:
filetypehidden = Filetype.objects.get(name='Hidden')
except ObjectDoesNotExist:
raise ValidationError('No file Type Hidden found')
try:
filetypeopen = Filetype.objects.get(name='Open')
except ObjectDoesNotExist:
raise ValidationError('No file Type Open found')
if file.type == filetypehidden:
file.type = filetypeopen
file.save()
elif file.type == filetypeopen:
file.type = filetypehidden
file.save()
serial = FilesSerializer(file,many=False)
return Response(serial.data)
class ChangeFileAPIView (APIView):
permission_classes = [IsAuthenticated]
def get(self, request, id):
try:
currentAlbum = Album.objects.get(album_files__id=id)
except ObjectDoesNotExist:
raise ValidationError('No Album')
user = request.user
if user.is_superuser:
return changeFile(id)
elif user.groups.filter(name='Customer').exists():
return changeFile(id)
else:
return Response(False)
def getALLFILES(slug):
file = Files.objects.filter(album__slug=slug)
serial = FilesSerializer(file, many=True)
return Response(serial.data)
def getOPENALLFILES(slug):
file = Files.objects.filter(album__slug=slug).filter(type__name='Open')
serial = FilesSerializer(file, many=True)
return Response(serial.data)
class AlbumFilesAPIView (APIView):
permission_classes = [AllowAny]
def get(self, request, slug):
try:
currentAlbum = Album.objects.get(slug=slug)
except ObjectDoesNotExist:
raise ValidationError('No Album')
try:
pub_Album = Albumaccess.objects.get(name='Public')
except ObjectDoesNotExist:
raise ValidationError('No Album Access')
try:
pri_Album = Albumaccess.objects.get(name='Private')
except ObjectDoesNotExist:
raise ValidationError('No Album Access')
if request.user.is_authenticated:
user = request.user
if user.is_superuser:
return getALLFILES(slug)
else:
try:
accessalbum = Album.objects.get(slug=slug, albumuser_album__user=user)
if user.groups.filter(name='Customer').exists():
return getALLFILES(slug)
elif user.groups.filter(name='Guest').exists():
return getOPENALLFILES(slug)
else:
raise ValidationError('No Access')
except ObjectDoesNotExist:
if currentAlbum.access == pub_Album:
return getOPENALLFILES(slug)
else:
raise ValidationError('No Access')
else:
if currentAlbum.access == pub_Album:
return getOPENALLFILES(slug)
else:
raise ValidationError('No Access')
class FilesListAPIView(ListAPIView):
permission_classes = [AllowAny]
serializer_class = FilesSerializer
queryset = Files.objects.all()
class FilesRetrieveAPIView(RetrieveAPIView):
permission_classes = [AllowAny]
serializer_class = FilesSerializer
queryset = Files.objects.all()
lookup_field = 'asset_id'
lookup_url_kwarg = 'asset_id'
class FilesUpdateAPIView(UpdateAPIView):
permission_classes = [AllowAny]
serializer_class = FilesSerializer
queryset = Files.objects.all()
lookup_field = 'asset_id'
lookup_url_kwarg = 'asset_id'
class FilesCreateAPIVIEW(CreateAPIView):
permission_classes = [AllowAny]
serializer_class = FilesSerializer
queryset = Files.objects.all()
lookup_field = 'asset_id'
lookup_url_kwarg = 'asset_id'
class FilesDestoryAPIVIEW(DestroyAPIView):
permission_classes = [AllowAny]
serializer_class = FilesSerializer
queryset = Files.objects.all()
lookup_field = 'asset_id'
lookup_url_kwarg = 'asset_id'
| [
"[email protected]"
] | |
df49a368f3b72c4c1b65114df321fdaa74c9e3f0 | f0fe0154e6cf6dba4606d0c0dba9cda9e2de2a50 | /user_auth/urls.py | e5d2646d37795ef6a034591ab31df383b9b6c2e1 | [] | no_license | Irina-Roslaya/notifier | ca84015192a5de6cf1c5e668550a68a2657a793c | 259dfe05c361d383e79840a51479c961f09f6c4a | refs/heads/master | 2023-02-08T01:00:16.248666 | 2020-11-10T16:57:15 | 2020-11-10T16:57:15 | 318,980,217 | 0 | 0 | null | 2020-12-06T07:52:52 | 2020-12-06T07:52:52 | null | UTF-8 | Python | false | false | 1,362 | py | from django.contrib.auth.views import PasswordResetConfirmView, PasswordResetView, PasswordResetCompleteView, \
PasswordResetDoneView, LoginView, LogoutView
from django.urls import path
from user_auth.views import RegistrationView
urlpatterns = [
path('login/', LoginView.as_view(template_name='user_auth/login.html'), name='login'),
path('logout/', LogoutView.as_view(template_name='user_auth/logout.html'), name='logout'),
path('registration/', RegistrationView.as_view(), name='registration'),
path('password_reset/', PasswordResetView.as_view(
template_name='user_auth/password_reset.html',
email_template_name='user_auth/email_message_for_password_change.html',
success_url='../password_reset_done/'),
name='password-reset'),
path('password_reset_done/', PasswordResetDoneView.as_view(template_name='user_auth/password_reset_done.html'),
name='password-reset-done'),
path('password_reset/<str:uidb64>/<str:token>/', PasswordResetConfirmView.as_view(
template_name='user_auth/password_reset_confirm.html',
success_url='../../../password_reset_complete/'),
name='password-reset-confirm'),
path('password_reset_complete/', PasswordResetCompleteView.as_view(
template_name='user_auth/password_reset_complete.html'), name='password-reset-complete'),
]
| [
"[email protected]"
] | |
e29cd27f548d29038584f8ba7ed25b6ffd8ea298 | 314fb2eea31aea4520465702b12fd2a7c5b4c728 | /ledm/trees/network.py | 95193f11614dcd88dacaf034c527d15c3939fc59 | [] | no_license | gundan0079/FalconCloud | dbdc1fdfddfac458c110e65969d4462672440e21 | c02bfb760c2f640754038cccd3b464c3026d11e5 | refs/heads/master | 2021-07-21T04:30:48.453106 | 2017-10-31T06:14:02 | 2017-10-31T06:14:02 | 108,953,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,656 | py | """
LEDM wrapper around Network related trees
"""
import logging
from . import ledm_templates
from .ledm_tree import LEDMTree
log = logging.getLogger(__name__)
class Adapters(LEDMTree):
"""
Adapters tree /IoMgmt/Adapters
"""
def __init__(self, data=ledm_templates.ADAPTERS):
super().__init__(data)
@property
def power(self):
"""
on/off settings for Power
"""
return self.get("Power")
@power.setter
def power(self, value):
expected_values = ["on", "off"]
if value not in expected_values:
raise ValueError("power incorrect settings\Expected: {}\nReceived: {}".format(expected_values, value))
self.set("Power", value)
@property
def power_level(self):
"""
on/off settings for Power
"""
return self.get("PowerLevel")
class NetAppsDyn(LEDMTree):
"""
NetAppsDyn tree /DevMgmt/NetAppsDyn.xml
"""
def __init__(self, data=ledm_templates.NET_APPS_DYN):
super().__init__(data)
@property
def direct_print(self):
"""
enable/disable DirectPrint
"""
return self.get("DirectPrint")
@direct_print.setter
def direct_print(self, value):
expected_values = ["enabled", "disabled"]
if value not in expected_values:
raise ValueError("direct_print incorrect settings\Expected: {}\nReceived: {}".format(expected_values, value))
self.set("DirectPrint", value)
class NetAppsSecureDyn(LEDMTree):
"""
NetAppsDyn tree /DevMgmt/NetAppsSecureDyn.xml
"""
def __init__(self, data=ledm_templates.NET_APPS_SECURE_DYN):
super().__init__(data)
@property
def state(self):
"""
enable/disable DirectPrint
"""
return self.get("State")
@state.setter
def state(self, value):
expected_values = ["enabled", "disabled"]
if value not in expected_values:
raise ValueError("state incorrect settings\Expected: {}\nReceived: {}".format(expected_values, value))
self.set("State", value)
class Wifi(LEDMTree):
"""
Adapters Template for tree /IoMgmt/Adapters/Wifi0
"""
def __init__(self, data=ledm_templates.ADAPTERS):
super().__init__(data)
@property
def power(self):
"""
on/off settings for Power
"""
return self.get("Power")
@power.setter
def power(self, value):
expected_values = ["on", "off"]
if value not in expected_values:
raise ValueError("power incorrect settings\Expected: {}\nReceived: {}".format(expected_values, value))
self.set("Power", value)
@property
def power_level(self):
"""
on/off settings for Power
"""
return self.get("PowerLevel")
class WifiNetworks(LEDMTree):
"""
NetAppsDyn tree /DevMgmt/NetAppsSecureDyn.xml
"""
def __init__(self, data):
super().__init__(data)
@property
def ssid(self):
"""
Return a list of SSIDs
"""
ssids = []
ssid_nodes = self.data.findAll("SSID")
for ssid_nodes in ssid_nodes:
ssids.append(ssid_nodes.text)
return ssids
class WifiProfile(LEDMTree):
"""
NetAppsDyn tree /DevMgmt/NetAppsSecureDyn.xml
"""
def __init__(self, data=ledm_templates.WIFI_PROFILE):
super().__init__(data)
@property
def locale(self):
"""
Read/Write Locale
"""
return self.get("Locale")
@locale.setter
def locale(self, value):
self.set("Locale", value)
| [
"[email protected]"
] | |
06b3013203fc5ed569676d2b786692ba60f54286 | 7710efc124843352ea6002578ffd3358087b8236 | /gowestapp/go/datasets/red_light_camera_notices/process.py | fc2b0400a4035bff6c8143ea7e6f7b9286235e87 | [
"MIT"
] | permissive | deccico/gowest | d1a143a665544579b3006ad4cdc606db2c568402 | bbf5f6ffd9d7bd17e23586efdb339bd08ab60285 | refs/heads/master | 2021-09-08T14:51:14.437888 | 2021-09-04T02:59:09 | 2021-09-04T02:59:09 | 21,733,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | '''
Get dictionary of {suburb => [avg number of monthly fines, avg fine value]}
'''
import csv
import os
def getRedLightFinesBySuburb():
file = open(os.path.dirname(os.path.realpath(__file__)) + '/data.csv', 'r')
isfirst = True
reader = csv.reader(file)
# Start by storing totals so we can calculate weighted average later
resultTotals = {} # dict of suburb => [list of [number, totalvalue]]
for row in reader:
if isfirst:
isfirst = False
continue
#2007/08,Apr,1,318,Red Light Camera,Not Stop At Red Light - Camera Detected - Corporation,STONEY CREEK ROAD (140) PEAKHURST WESTBOUND
number = int(row[2])
totalvalue = int(row[3])
address = row[6] #STONEY CREEK ROAD (140) PEAKHURST WESTBOUND
# Split the address - grab the suburb (after the parens, before the last word)
suburb = address[address.find(')')+2:]
# Strip 'bound' from result
if 'BOUND' in suburb and ' ' in suburb:
suburb = suburb[:suburb.rfind(' ')]
# Some suburbs names have parens
if '(Z)' in suburb:
suburb = suburb[:suburb.find('(')]
if ')' in suburb:
suburb = suburb[suburb.rfind(')')+1:]
if 'O/RAMP' in suburb:
suburb = suburb[suburb.find('O/RAMP')+7:]
suburb = ' '.join(w.capitalize() for w in suburb.split()) # title capitalise
if suburb not in resultTotals:
resultTotals[suburb] = []
resultTotals[suburb].append([number, totalvalue])
# now calculate averages
results = {}
for suburb, totals in resultTotals.iteritems():
months = 0
totalfines = 0
totalfinevalue = 0
for total in totals:
months += 1
totalfines += total[0]
totalfinevalue += total[1]
numbermonthly = totalfines / months
avgvalue = totalfinevalue / totalfines
results[suburb] = [numbermonthly, avgvalue]
return results
if __name__ == '__main__':
print getRedLightFinesBySuburb() | [
"[email protected]"
] | |
0889b4206fddc5e448076e4b2c246c02df4a6227 | 04d5510b7865cc791a52c3f4feb232d7dea12928 | /flight-deals-start/data_manager.py | e08e5e9d48b795a816e0e7896a10e1055c902d9a | [] | no_license | kodchapong1295/Learning_python | 7b8341c0f38a0dad2d248adfdab464278ddc6045 | c1f816189cadd7a87758797c3e94184ef9efcb2f | refs/heads/main | 2023-06-26T08:21:09.717658 | 2021-07-24T19:09:05 | 2021-07-24T19:09:05 | 374,912,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import requests
SHEET_ENDPOINT = "https://api.sheety.co/302424d51f43453377ceccb76743d6d4/flightDeals/prices"
class DataManager:
# This class is responsible for talking to the Google Sheet.
def __init__(self):
self.destination_data = {}
def get_destination_data(self):
response = requests.get(SHEET_ENDPOINT)
data = response.json()
self.destination_data = data["prices"]
return self.destination_data
def update_destination_codes(self):
for city in self.destination_data:
new_data = {
"price": {
"iataCode": city["iataCode"]
}
}
response = requests.put(
url=f"{SHEET_ENDPOINT}/{city['id']}",
json=new_data
)
print(response.text)
| [
"[email protected]"
] | |
2985a529593da7029e6868a014adb5ffd5482868 | 88e06bab1989c81a2dd649bb09b144fa7c958f89 | /ib_best_time_to_buy_and_sell_stock_3.py | 9fcb9cac7a3c97e80e917ccc8ad6581a8d4fbbbc | [] | no_license | VaibhavD143/Coding | 4499526b22ee4ef13f66c3abcea671c80a8f748a | 5de3bae8891c7d174cbc847a37c3afb00dd28f0e | refs/heads/master | 2023-08-06T21:56:44.934954 | 2021-10-09T18:31:29 | 2021-10-09T18:31:29 | 263,890,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | class Solution:
# @param A : tuple of integers
# @return an integer
def maxProfit(self, A):
if len(A)<2:
return 0
left = [0]*(1+len(A))
right = [0]*(1+len(A))
currMin = A[0]
for i in range(1,len(A)):
left[i+1] = max(A[i]-currMin,left[i])
currMin = min(currMin,A[i])
currMax = A[-1]
for i in range(len(A)-2,-1,-1):
right[i] = max(currMax-A[i],right[i+1])
currMax = max(currMax,A[i])
# print(left)
# print(right)
res=0
for i in range(len(A)):
res = max(res,left[i]+right[i])
return res | [
"[email protected]"
] | |
5a386e5db8c1dd224b1fd56600b56125a1bffec9 | 4a20ed85127f353746d284c97975a4ba613e2868 | /code/tflite/run_model_loopstl10.py | cbdc76cc8c38e24c2a6889d1ac3de0088a9c01e0 | [] | no_license | toelt-llc/TOELT-tfrabbit | 371a785a58185f677ba6c86581d41bdf9b8c700c | 8a0e4ea852d9e3bc1bb47729f82a1efd2d20a7ef | refs/heads/main | 2023-08-21T14:00:53.784038 | 2021-10-19T07:55:06 | 2021-10-19T07:55:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,245 | py | #!/usr/bin/env python3
import tensorflow as tf
import pandas as pd
import numpy as np
import pathlib
import pickle
import time
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#TODO : main function
import matplotlib
matplotlib.use('Agg')
tflite_models_dir = pathlib.Path("./stl10_tflite_models/")
stl10_models_dir = pathlib.Path('./stl10_models') #Not used
## adapted to stl10
from stl10_load import read_all_images, read_labels
#TRAIN_PATH = '../../data/stl10_binary/train_X.bin'
#TRAIN_LABEL_PATH = '../../data/stl10_binary/train_y.bin'
TEST_PATH = '../../data/stl10_binary/test_X.bin'
TEST_LABEL_PATH = '../../data/stl10_binary/test_y.bin'
#train_images = read_all_images(TRAIN_PATH)
#train_labels = read_labels(TRAIN_LABEL_PATH)
test_images = read_all_images(TEST_PATH)
test_labels = read_labels(TEST_LABEL_PATH)
#train_images = train_images.astype(np.float32) / 255.0
test_images = test_images.astype(np.float32) / 255.0
#train_labels -= 1
test_labels -= 1
## NOT reducing test_set by half for the run loops :
# test_images = test_images[:4000]
# test_labels = test_labels[:4000]
def run_tflite_model(tflite_file, test_image_indices):
global test_images
# Initialize
interpreter = tf.lite.Interpreter(model_content=(tflite_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
predictions = np.zeros((len(test_image_indices),), dtype=int)
inv_times = []
for i, test_image_index in enumerate(test_image_indices):
test_image = test_images[test_image_index]
if input_details['dtype'] == np.uint8:
input_scale, input_zero_point = input_details["quantization"]
test_image = test_image / input_scale + input_zero_point
#test_image = test_image.astype(input_details['dtype'])
test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
interpreter.set_tensor(input_details["index"], test_image)
start = time.time()
interpreter.invoke()
end = time.time()
inv_times.append(end-start)
#print(interpreter.get_tensor(output_details["index"]))
output = interpreter.get_tensor(output_details["index"])[0]
predictions[i] = output.argmax()
inv_time = sum(inv_times)/len(inv_times)
#print('whole invoke :', sum(inv_times))
return predictions, inv_time
def evaluate_model(tflite_file):
global test_images
global test_labels
test_image_indices = range(test_images.shape[0])
start = time.time()
predictions, invokes_times = run_tflite_model(tflite_file, test_image_indices)
end = time.time()
accuracy = (np.sum(test_labels== predictions) * 100) / len(test_images)
#print('Model accuracy is %.4f%% (Number of test samples=%d)' % (accuracy, len(test_images)))
print('Inference time is : ', round(end-start,2))
#print('Invoke time is :', invokes_times)
return round(end-start,2)
def disk_usage(dir):
sizes_kb = {}
print('Models sizes : ')
for _,_,filenames in os.walk(dir):
#print(filenames)
for file in sorted(filenames):
print(file, ':', os.stat(os.path.join(dir,file)).st_size/1000, 'kb')
sizes_kb[file] = os.stat(os.path.join(dir,file)).st_size/1000
return sizes_kb
## Run TFLite
tflite_models = []
for dirname, _, filenames in os.walk('./stl10_tflite_models/'):
for filename in sorted(filenames):
tflite_models.append(os.path.join(dirname, filename))
num_iter = int(sys.argv[1])
inferences = {}
for model in tflite_models:
print('Model running is : ', model)
tflite_model = open(model, "rb").read()
inferences[model]=[]
i = 0
for i in range(num_iter):
#inferences.append(evaluate_model(tflite_model))
inferences[model].append(evaluate_model(tflite_model))
i +=1
infdf = pd.DataFrame.from_dict(inferences)
print(infdf)
## Run classic TF part
cnn_model = tf.keras.models.load_model('./stl10_models/CNN_classic.h5')
ffnn_model = tf.keras.models.load_model('./stl10_models/FFNN_classic.h5')
tf_models = [cnn_model, ffnn_model]
classic_inferences = {} #{'cnn':[],'ffnn':[]}
for model in tf_models:
classic_inferences[model._name] = []
i = 0
for i in range(num_iter):
start = time.time()
loss, acc = model.evaluate(test_images, test_labels, verbose=False)
end = time.time()
classic_inferences[model._name].append(round(end-start,2))
i +=1
classic_infdf = pd.DataFrame.from_dict(classic_inferences)
print(classic_infdf)
result = pd.concat([infdf, classic_infdf], axis=1)
result.append(result.std(), ignore_index=True)
# Memory usage
litemodels_size = list(disk_usage(tflite_models_dir).values())
models_size = list(disk_usage(stl10_models_dir).values())
sizes_list = litemodels_size + models_size
# The pickle file will contain a list including the combined dataframes + the disk size
name = sys.argv[2]
data = []
data.append(result), data.append(sizes_list)
with open('RPI_inferences_stl10_'+str(num_iter)+name+'.pkl', 'wb') as f:
pickle.dump(data, f)
result.to_csv('RPI_inferences_stl10_'+str(num_iter)+name+'.csv', index=False)
| [
"[email protected]"
] | |
81e77915ca7bad22a571838755dd54d07f637b78 | 97832e4e6faeda1394b2ec6e9be8fc804d8e8f0a | /verilog-grapher/main.py | 56bd98cdeafd97a59b8624f86c75cca43a637864 | [] | no_license | santhosh-raghul/VLSI | 6fa88a884eef10773ce38761282cd94284a62875 | 443d72021bca7e969c5d17fefab080627644ac91 | refs/heads/master | 2023-01-24T00:34:16.795445 | 2020-11-18T13:54:52 | 2020-11-18T13:54:52 | 296,787,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import argparse
from verilog_to_graph import verilog_to_graph
from process_graph import process_graph
def main():
parser=argparse.ArgumentParser(description="convert gate level verilog to a visualizable graph")
parser.add_argument('verilog_file',type=str,help="path to the verilog module file containing gate level design")
args=parser.parse_args()
image_file_name=args.verilog_file.split('.v')[0]+".png"
input_nodes,output_nodes,nodes,edges=verilog_to_graph(args.verilog_file)
process_graph(input_nodes,output_nodes,nodes,edges,image_file_name)
if __name__=="__main__":
main() | [
"[email protected]"
] | |
539059c3ac72ce24e927d6fd00c4be155cd5e292 | 45cb7f3ac4d4d8befdeb7a0ff1579d6556234555 | /pycon-2021/turtle.py | 57c716acbf7227ba965a7d2c0c9c3e3edef98ac1 | [
"MIT"
] | permissive | aroberge/talks | fbedacdd2c8505042c322b141378a52a1e36ea09 | 4b80ee4f33e733ecfa3c950e269f3bc1377c506a | refs/heads/master | 2021-09-26T04:40:45.876560 | 2021-09-09T23:53:53 | 2021-09-09T23:53:53 | 218,380,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | # Draw a square
import turtle as t
for i in range(4):
t.forward(100)
t.left(90)
| [
"[email protected]"
] | |
913b2953e26843a35980d3ead63eb3616a9191da | 5c61990fc1a79f389111a3e449c1fadf65fc1b8c | /wt_purchase_request_extend/models/purchase_request.py | f5a38954182061e03dd2b8c3139dc650163397cb | [] | no_license | brahim94/portnet | 3befb64009fd014b74e01151cc429a613d3d2f11 | f1120ce4806ba2fd7e26132ca918d1ce8b9ad32c | refs/heads/master | 2023-04-14T07:17:40.956207 | 2021-04-27T16:37:48 | 2021-04-27T16:37:48 | 356,211,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,049 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import Warning, UserError
_STATES = [
("draft", "Draft"),
("to_be_approve", "Approve"),
("to_approve", "Validated"),
("approved", "Qualified"),
("done", "Closed"),
("rejected", "Refusé"),
("cancelled", "Annulé")
]
class PurchaseRequest(models.Model):
_inherit = 'purchase.request'
qualifier_id = fields.Many2one("res.users", string="Qualifier", track_visibility="onchange",
domain=lambda self: [("groups_id", "in", self.env.ref("wt_purchase_request_extend.group_purchase_request_qualifer").id)])
financement_type = fields.Selection([('CHB', 'CHB'),('Sur Budget', 'Sur Budget')], string="Type de Financement")
# programme_chb = fields.Many2one("purchase.financement", string="Programme CHB")
#override
assigned_to = fields.Many2one(comodel_name="res.users", string="Responsible", track_visibility="onchange",
domain=lambda self: [("groups_id", "in", self.env.ref("wt_purchase_request_extend.group_purchase_request_responsible").id)],
default=lambda self:self.env.uid)
state = fields.Selection(selection=_STATES, string="Status", index=True, track_visibility="onchange",
required=True, copy=False, default="draft")
validator_id = fields.Many2one(comodel_name="res.users", string="Validator", track_visibility="onchange",
domain=lambda self: [("groups_id", "in", self.env.ref("purchase_request.group_purchase_request_manager").id)])
# @api.onchange('financement_type')
# def onchange_financement_type(self):
# if self.financement_type != 'CHB':
# self.programme_chb = ''
def button_to_approve(self):
if not self.assigned_to.employee_ids:
raise UserError(_('There is no employee found for responsible user.'))
if not self.assigned_to.employee_ids[0].department_id:
raise UserError(_('There is no employee department found for responsible user.'))
if not self.assigned_to.employee_ids[0].department_id == self.department_id:
raise UserError(_('There is no matching employee department found for responsible user.'))
if not self.validator_id:
raise UserError(_('Please assign Validator before process.'))
return super(PurchaseRequest, self).button_to_approve()
@api.onchange('department_id')
def onchange_department(self):
if self.department_id and self.department_id.manager_id:
self.assigned_to = self.department_id.manager_id.user_id.id if self.department_id.manager_id.user_id else False
def button_to_be_approve(self):
if self.viewd_approved == False:
raise UserError(_('La demande doit etre vue et approvée par le responsable.'))
return self.write({"state": "to_be_approve"})
def button_approved(self):
if not self.qualifier_id:
raise UserError(_('Please assign Qualificateur before process.'))
return self.write({"state": "approved"})
def button_rejected(self):
return self.write({"state": "rejected"})
def button_cancelled(self):
if not self.env.user.has_group('wt_purchase_request_extend.group_purchase_request_responsible'):
self.mapped("line_ids").do_cancel()
return self.write({"state": "cancelled"})
@api.depends("state", "line_ids.product_qty", "line_ids.cancelled")
def _compute_to_approve_allowed(self):
for rec in self:
rec.to_approve_allowed = rec.state == "to_be_approve" and any(
[not line.cancelled and line.product_qty for line in rec.line_ids]
)
class PurchaseFinancement(models.Model):
_name = 'purchase.financement'
_description = 'Purchase Financement'
name = fields.Char(string="Name", required=True)
class PurchasePrevisionnel(models.Model):
_name = 'purchase.previsionnel'
_description = 'Purchase Previsionnel'
name = fields.Char(string="Name", required=True)
| [
"[email protected]"
] | |
417daf1c747f5fc931258ed46130720d72852cf6 | 5e47584222df03115a672aa50b104e5637cae0a6 | /asr_vae/models/networks/variational/variational_lstm.py | c317ebe2e7f969944cd16ead068ec91cbf21223c | [] | no_license | bstriner/ctc-process | 8444689ab462696d26ec746693d7f3721f5a6138 | 15b2eeddda0ece40ca31242e37e24a0fc4a0ce53 | refs/heads/master | 2023-02-23T20:02:42.012255 | 2021-01-21T00:16:24 | 2021-01-21T00:16:24 | 331,465,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,825 | py | import numpy as np
import tensorflow as tf
from tensorflow.contrib.cudnn_rnn.python.layers.cudnn_rnn import CUDNN_GRU, CUDNN_GRU_PARAMS_PER_LAYER, \
CUDNN_INPUT_LINEAR_MODE, CUDNN_LSTM, CUDNN_LSTM_PARAMS_PER_LAYER, CUDNN_RNN_BIDIRECTION, CUDNN_RNN_RELU, \
CUDNN_RNN_RELU_PARAMS_PER_LAYER, CUDNN_RNN_TANH, CUDNN_RNN_TANH_PARAMS_PER_LAYER, CUDNN_RNN_UNIDIRECTION
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import _cudnn_rnn, cudnn_rnn_canonical_to_opaque_params
from .variational_variable import VariationalParams, get_variable
def state_shape(num_layers, num_dirs, num_units, batch_size):
"""Shape of Cudnn LSTM states.
Shape is a 2-element tuple. Each is
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return ([num_layers * num_dirs, batch_size, num_units],
[num_layers * num_dirs, batch_size, num_units])
def get_num_params_per_layer(rnn_mode=CUDNN_LSTM):
if rnn_mode == CUDNN_LSTM:
return CUDNN_LSTM_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_GRU:
return CUDNN_GRU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_TANH:
return CUDNN_RNN_TANH_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_RELU:
return CUDNN_RNN_RELU_PARAMS_PER_LAYER
else:
raise ValueError()
def canonical_bias_shape_fn(direction, num_outputs, rnn_mode=CUDNN_LSTM):
num_dirs = 1 if direction == CUDNN_RNN_UNIDIRECTION else 2
num_params_per_layer = get_num_params_per_layer(rnn_mode=rnn_mode)
return [[num_outputs]] * num_dirs * num_params_per_layer
def canonical_bias_shapes_fn(direction, num_outputs, layers, rnn_mode=CUDNN_LSTM):
return canonical_bias_shape_fn(direction=direction, num_outputs=num_outputs, rnn_mode=rnn_mode) * layers
def canonical_weight_shape_fn(input_size, direction, layer, num_outputs, rnn_mode=CUDNN_LSTM):
num_params_per_layer = get_num_params_per_layer(rnn_mode=rnn_mode)
num_gates = num_params_per_layer // 2
is_bidi = direction == CUDNN_RNN_BIDIRECTION
if layer == 0:
wts_applied_on_inputs = [(num_outputs, input_size)] * num_gates
else:
if is_bidi:
wts_applied_on_inputs = [(num_outputs, 2 * num_outputs)] * num_gates
else:
wts_applied_on_inputs = [(num_outputs, num_outputs)] * num_gates
wts_applied_on_hidden_states = [(num_outputs, num_outputs)] * num_gates
tf_wts = wts_applied_on_inputs + wts_applied_on_hidden_states
return tf_wts if not is_bidi else tf_wts * 2
def canonical_weight_shapes_fn(input_size, direction, layers, num_outputs, rnn_mode=CUDNN_LSTM):
shapes = []
for layer in range(layers):
shapes.extend(canonical_weight_shape_fn(input_size, direction, layer, num_outputs, rnn_mode=rnn_mode))
return shapes
def zero_state(num_layers, num_dirs, num_units, batch_size, dtype):
res = []
for sp in state_shape(
num_layers=num_layers,
num_dirs=num_dirs,
num_units=num_units,
batch_size=batch_size):
res.append(tf.zeros(sp, dtype=dtype))
return tuple(res)
def variational_lstm(inputs, num_units, scope, vparams: VariationalParams, num_layers=1,
direction=CUDNN_RNN_BIDIRECTION, sequence_lengths=None, rnn_mode="lstm",
kernel_initializer=None, bias_initializer=None, initial_state=None, dropout=0.0, is_training=True):
if rnn_mode == 'cudnn_lstm':
rnn_mode = CUDNN_LSTM
elif rnn_mode == 'cudnn_gru':
rnn_mode = CUDNN_GRU
elif rnn_mode == 'cudnn_rnn_tanh':
rnn_mode = CUDNN_RNN_TANH
elif rnn_mode == 'cudnn_rnn_relu':
rnn_mode = CUDNN_RNN_RELU
else:
raise ValueError()
with tf.variable_scope(scope):
dtype = tf.float32
batch_size = tf.shape(inputs)[1]
input_shape = inputs.shape
if input_shape.ndims != 3:
raise ValueError("Expecting input_shape with 3 dims, got %d" %
input_shape.ndims)
if input_shape[-1].value is None:
raise ValueError("The last dimension of the inputs to `CudnnRNN` "
"should be defined. Found `None`.")
input_size = input_shape[-1].value
if kernel_initializer is None:
kernel_initializer = tf.initializers.glorot_uniform(dtype=tf.float32)
if bias_initializer is None:
bias_initializer = tf.initializers.constant(0.0, dtype=tf.float32)
canonical_weight_shapes = canonical_weight_shapes_fn(
input_size=input_size,
direction=direction,
layers=num_layers,
num_outputs=num_units,
rnn_mode=rnn_mode)
canonical_bias_shapes = canonical_bias_shapes_fn(
direction=direction,
num_outputs=num_units,
layers=num_layers,
rnn_mode=rnn_mode)
weights = [
kernel_initializer(sp, dtype=dtype)
for sp in canonical_weight_shapes
]
biases = [
bias_initializer(sp, dtype=dtype)
for sp in canonical_bias_shapes
]
print(canonical_weight_shapes)
print(canonical_bias_shapes)
print(len(canonical_weight_shapes))
print(len(canonical_bias_shapes))
opaque_params_t = cudnn_rnn_canonical_to_opaque_params(
rnn_mode=rnn_mode,
num_layers=num_layers,
num_units=num_units,
input_size=input_size,
weights=weights,
biases=biases,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=direction,
dropout=0,
seed=0,
name=None)
count = 0
for weight in weights:
for s in weight.shape:
assert s
count += np.product([s.value for s in weight.shape])
for bias in biases:
for s in bias.shape:
assert s
count += np.product([s.value for s in bias.shape])
print("Count: {}".format(count))
tfcount = tf.constant([count])
with tf.control_dependencies([tf.assert_equal(tf.shape(opaque_params_t), tfcount)]):
opaque_params_t = tf.identity(opaque_params_t)
print("opaque_params_t: {}".format(opaque_params_t))
opaque_params_t.set_shape([count])
print("opaque_params_t: {}".format(opaque_params_t))
opaque_params = get_variable(
shape=opaque_params_t.shape,
name='kernel',
initializer=opaque_params_t,
vparams=vparams
)
print("LSTM Kernel: {}".format(opaque_params))
if initial_state is not None and not isinstance(initial_state, tuple):
raise TypeError("Invalid initial_state type: %s, expecting tuple." %
initial_state)
num_dirs = 2 if direction == CUDNN_RNN_BIDIRECTION else 1
if initial_state is None:
initial_state = zero_state(
num_layers=num_layers,
num_dirs=num_dirs,
num_units=num_units,
batch_size=batch_size,
dtype=dtype)
h, c = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
output, output_h, output_c = _cudnn_rnn( # pylint:disable=protected-access
inputs=inputs,
input_h=h,
input_c=c,
rnn_mode=rnn_mode,
params=opaque_params,
is_training=is_training,
sequence_lengths=sequence_lengths,
time_major=True,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=direction,
dropout=dropout)
return output, (output_h, output_c)
| [
"[email protected]"
] | |
51b07ea4cce1f767c1f1036d35ce7a4029d6df0c | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /jowQ2aeZut4vGyHyP_16.py | aed6f0c2cd64adca3d3aec4b15d1453381e8ff2e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | """
Given a slope of a line, calculate and return the value of the angle of line
(relative to the y -axis). For example, a horizontal line would be 90 degrees.
### Examples
convert(1) ➞ 45
convert(0) ➞ 90
convert(-1) ➞ 135
### Notes
* All values returned should be in degrees.
* All values returned should be rounded to the nearest whole number.
* The value to return must be strictly between 0 and 180.
* All inputs will be valid integer values.
"""
import math
def convert(slope):
return round(abs(math.degrees(math.atan(slope)) - 90))
| [
"[email protected]"
] | |
ef634e626382062978a0617650e14dfcc4008268 | 3c83887e165bfde579202b3867e900fdc7ba52f9 | /Ejercicio23.py | 7e1342fae3e87f0979cb3f9f81364a846238dcd2 | [] | no_license | JuanMacias07x/30abril | e545c3c1684d8a72af316ca8f6eb52d9cc628df5 | efbe23ff233de075d201d08057ae064a6f8d678e | refs/heads/main | 2023-04-28T10:52:22.423791 | 2021-04-30T23:42:05 | 2021-04-30T23:42:05 | 362,258,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | #Programa que lea un número e indique si este es par-positivo, par-negativo, impar-positivo o impar-negativo
#Definir variables
number = int(input("Ingrese un número = "))
if ( number >= 0) and (number % 2 == 0):
print("El número" + " " + str(number) + " " + "es positivo y par." + "\n")
else:
if (number < 0) and (number % 2 == 0):
print("El número" + " " + str(number) + " " + "es negativo y par." + "\n")
else:
if (number >= 0) and (number % 2 != 0):
print("El número" + " " + str(number) + " " + "es positivo e impar." + "\n")
else:
if (number < 0) and (number % 2 != 0):
print("El número" + " " + str(number) + " " + "es negativo e impar." + "\n")
print("¡El programa ha finalizado con éxito!" + "\n")
| [
"[email protected]"
] | |
37b05a9678e11847749bff59acef0e2d5c802387 | c0ef68e572c27d8cd2cc24a08f2f24e63b45e3be | /security app.py | e2052130b23f775c3a7c3ccde4262cfc3783b669 | [] | no_license | sprinzs123/motion-camera-with-database-and-notification | 9952f9f82b7318e8f9379d1e7264a3446c28de44 | 8af0fb41d8f60d79099db73aba4e7ccf39c5ec0a | refs/heads/master | 2020-07-17T06:28:24.622789 | 2019-09-05T17:38:00 | 2019-09-05T17:38:00 | 205,966,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,728 | py | import cv2
import datetime as datetime
import pandas
import smtplib
# create table where we record when movement was present from start to finish
status_list = [None, None]
times = []
df = pandas.DataFrame(columns=["Start", "End"])
# declare what is out first frame is
first_frame = None
video = cv2.VideoCapture(0)
# format and name of the video
# we set FPS and resolution size of video
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('output.avi', fourcc, 24, (640, 480))
# start our recording loop
while video.isOpened():
status = 0
ret, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21,), 0)
# using grayed out version of the original video because gray frame is more
# sensitive when dealing with motion recording and object recognition
# assigning initial frame from what we are going to compare rest of the frame
if first_frame is None:
first_frame = gray
continue
# make our recordings from where we determine if movement happened or not
delta_frame = cv2.absdiff(first_frame, gray)
thrersh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thrersh_frame = cv2.dilate(thrersh_frame, None, iterations=2)
(cnts,_) = cv2.findContours(thrersh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# add timestamp on the video
# .putText is responsible for formation of our time stamp
font = cv2.FONT_HERSHEY_PLAIN
text = str(datetime.datetime.now())
cv2.putText(frame, text, (5, 25), font, 1, (0, 225, 225), 1)
# detects motion and writes into a file when motion is present
# status value is used to determine when the motion started or ended
# out.write records video only when motion is present
# the
for contour in cnts:
if cv2.contourArea(contour) < 700:
status = 1
out.write(frame)
cv2.imshow('frame', frame)
print(status)
# function to send up email notification when we detect motion
# we need to set up our email in order for this to work
# here URL where I have learned it https://youtu.be/Bg9r_yLk7VY?t=518
def send_mail():
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("[email protected]", "password")
subject = 'movement was detected'
body = 'check the stream'
msg = f"SSubject: {subject}\n\n{body}"
server.sendmail(
"[email protected]",
"[email protected]",
msg
)
server.quit()
# method how we record when movement ocured
# we compare our status variable and see where it changes from 0 to 1 and 1 to 0
# we also send email when the motion is been detected
status_list.append(status)
if status_list[-1] == 1 and status_list[-2] == 0:
times.append(datetime.datetime.now())
send_mail()
if status_list[-1] == 0 and status_list[-2] == 1:
times.append(datetime.datetime.now())
# close our application and update our list where we write end and start of the movement
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
if status == 1:
times.append(datetime.datetime.now())
break
# we write timestamp when did the movement happened into CSV table
# we iterate through the list because the format of the list is
# list = [start time, end time, start time, end time]
for i in range(0, len(times), 2):
df = df.append({"Start": times[i], "End": times[i+1]}, ignore_index=True)
df.to_csv("records of movement.csv")
# end everything when done with recording/showing image
video.release()
out.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
b54f7a1f838bdb2f2668477b3565be3399a8fcdf | d07834620562760dda49a03bbd2da88d7b2b8b6a | /Tutorial/jjj7.py | ebc859601d19c2b6dd11e89b8faf5fb299847415 | [] | no_license | ebylaska/ML_Potentials | d2eb888ed94fa9a36aeabd00fb721a5653e89bda | fd1b8fd5bd18763b000a121d60da5fd920926875 | refs/heads/master | 2022-04-30T09:44:41.269965 | 2022-04-11T16:22:20 | 2022-04-11T16:22:20 | 159,248,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,160 | py | import myfeedforward5 as myfeedforward
import math,random
import xyplotter
def plote(plot,xdat,edat,machine,weights):
x1 = []
y = []
y1 = []
dy = 0.1
xmin = 99999.9
xmax = -99999.9
ymin = 99999.9
ymax = -99999.9
for i in range(len(xdat)):
x = xdat[i]
e = edat[i]
e1 = machine.evaluate([x],weights)[0]
if (x < xmin): xmin = x
if (x > xmax): xmax = x
if (e < ymin): ymin = e
if (e1 < ymin): ymin = e1
if (e > ymax): ymax = e
if (e1 > ymax): ymax = e1
x1.append(x)
y.append(e)
y1.append(e1)
plot.resetwindow(xmin,ymin-dy,xmax,ymax+dy,"Spring Energies")
plot.plot(x1,y,"black")
plot.plot(x1,y1,"blue")
alpha0 = 0.01
alpha = 0.001
beta1 = 0.9
beta2 = 0.999
eps = 1e-8
A = 0.2
xdat = []
edat = []
for i in range(50):
x = 8.0*i/49.0
e = A*(x*x - 0.1*x**6 + 0.5*(x-4)**8)
xdat.append(x)
edat.append(e)
beta = 1.0
#sigmoid = lambda x: 1.0/(1.0+math.exp(-x))
#sigmoidp = lambda x: math.exp(-x)/(1.0+math.exp(-x))**2
#sigmoidpp = lambda x: math.exp(-x)*(math.exp(-x)-1.0)/(1.0+math.exp(-x))**3
ap = 1.0
xp = 2.5
bp = 3.0
penalty = lambda x: ap*(0.5*(math.tanh(bp*(x-xp)) - math.tanh(bp*(x+xp))) + 1.0)
penaltyp = lambda x: ap*0.5*bp*( (1/math.cosh(bp*(x-xp)))**2 - (1.0/math.cosh(bp*(x+xp)))**2)
sigmoid = lambda x: 0.5*(math.tanh(beta*x)+1.0)
sigmoidp = lambda x: 0.5*beta*(1.0/math.cosh(beta*x))**2
sigmoidpp = lambda x: 0.5*(-2.0)*beta*beta*math.tanh(beta*x)*(1.0/math.sech(beta*x))**2
xmoid1 = lambda x: x
xmoidp1 = lambda x: 1.0
xmoidpp1 = lambda x: 0.0
#xmoid2 = lambda x: x*x * 0.5
#xmoidp2 = lambda x: x
#xmoidpp2 = lambda x: 1.0
#xmoid3 = lambda x: x*x*x * (1.0/6.0)
#xmoidp3 = lambda x: 3*x*x * (1.0/6.0)
#xmoidpp3 = lambda x: 6*x * (1.0/6.0)
#
#xmoid4 = lambda x: x*x*x*x * (1.0/24.0)
#xmoidp4 = lambda x: 4*x*x*x * (1.0/24.0)
#xmoidpp4 = lambda x: 12*x*x * (1/0/24.0)
#bias = [[0.01],[0.01],[0.001],[0.0001],[0.00001],[0.0000001]]
bias = [[0.01],[0.01],[0.001]]
bias = []
machine = myfeedforward.MyFeedForward([1,15,30,1],[xmoid1,sigmoid,sigmoid,xmoid1],[xmoidp1,sigmoidp,sigmoidp,xmoidp1],[xmoidpp1,sigmoidpp,sigmoidpp,xmoidpp1],bias)
#machine = myfeedforward.MyFeedForward([1,13,26,52,1],[xmoid1,sigmoid,sigmoid,sigmoid,xmoid1],[xmoidp1,sigmoidp,sigmoidp,sigmoidp,xmoidp1],[xmoidpp1,sigmoidpp,sigmoidpp,sigmoidpp,xmoidpp1],bias)
#machine = myfeedforward.MyFeedForward([1,52,1],[xmoid1,sigmoid,xmoid1],[xmoidp1,sigmoidp,xmoidp1],[xmoidpp1,sigmoidpp,xmoidpp1],bias)
#machine = myfeedforward.MyFeedForward([1,1,1,1,1,1,1],[sigmoid,sigmoid,sigmoid,sigmoid,xmoid3,xmoid2,xmoid1],[sigmoidp,sigmoidp,sigmoidp,sigmoidp,xmoidp3,xmoidp2,xmoidp1],[sigmoidpp,sigmoidpp,sigmoidpp,sigmoidpp,xmoidpp3,xmoidpp2,xmoidpp1],bias)
#machine = myfeedforward.MyFeedForward([1,1,1,1,1,1],[sigmoid,sigmoid,sigmoid,sigmoid,xmoid2,xmoid1],[sigmoidp,sigmoidp,sigmoidp,sigmoidp,xmoidp2,xmoidp1],[sigmoidpp,sigmoidpp,sigmoidpp,sigmoidpp,xmoidpp2,xmoidpp1],bias)
weights = machine.initial_w()
for i in range(len(weights)):
weights[i] *= 1.0
nw = len(weights)
print "weights=",weights
m = [0.0]*len(weights)
v = [0.0]*len(weights)
beta1t = 1.0
beta2t = 1.0
plot = xyplotter.xyplotter(-1.0,0.0,1.0,1.0, "Spring Energies",4)
plote(plot,xdat,edat,machine,weights)
enter0 = raw_input(" -- start simulation --- ")
ii = 0
for i in range(1000000):
#x = 4.0*random.random()-2.0
#ii = random.randint(0,len(xdat)-1)
x = xdat[ii]
e = edat[ii]
ii = ((ii+1) % len(xdat))
gg = machine.w_energy_gradient([x],[e],weights)
error = gg[0]
g1 = gg[1]
for j in range(1,nw-1):
error += penalty(weights[j])
g1[j] += penaltyp(weights[j])
beta1t *= beta1
beta2t *= beta2
alphat = alpha*math.sqrt(1.0-beta2t)/(1.0-beta1t)
for j in range(nw):
m[j] = beta1*m[j] + (1.0-beta1)*g1[j]
v[j] = beta2*v[j] + (1.0-beta2)*g1[j]*g1[j]
weights[j] -= alphat*m[j]/(math.sqrt(v[j]) + eps)
if ((i%10000)==0):
e1 = machine.evaluate([x],weights)[0]
print i,x,e,e1,error
plote(plot,xdat,edat,machine,weights)
enter1 = raw_input(" -- finished --- ")
| [
"[email protected]"
] | |
acf597c3d2cbf06dc66442be2a4640a76659f173 | 10388f885dd0a671a39007a53f16d6f3b7e25bcf | /modules/users/models.py | 280c6d1478318dd785d6e08cb98b1c7434d435d4 | [] | no_license | jonatan-castaneda/minihack | 85aae66078710adfbfcbbb25574ae9dc4123c193 | f3b4a29e36ec19f6509a8fdff52420d82a464a72 | refs/heads/master | 2020-12-30T16:46:59.912002 | 2017-05-12T15:58:05 | 2017-05-12T15:58:05 | 91,030,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
class UserManager(BaseUserManager, models.Manager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
if not email:
raise ValueError('El email debe ser obligatorio')
email = self.normalize_email(email)
user = self.model(email=email,
is_active=True, is_staff=is_staff,
is_superuser=is_superuser, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(
email, password, False, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(
email, password, True, True, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin, models.Model):
# unique, no se van a repetir
id = models.AutoField(primary_key=True, unique=True)
nombre = models.CharField(max_length=40)
apellidos = models.CharField(max_length=25)
telefono = models.CharField(max_length=22)
email = models.EmailField(unique=True, max_length=50)
sexo = models.CharField(choices=(('M','Mujer'),('H','Hombre')), max_length=16,blank=True)
# intermediario entre trans de cada modelo, object managaer de cada modelo
objects = UserManager()
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
def get_short_name(self):
return self.nombre | [
"[email protected]"
] | |
144bee093a0e15f85d29ad68dcccaa148384f3b6 | ca660d6e66838b002114b52ae9c221fed6b1dac1 | /backend/artworks/migrations/0009_auto_20210417_2331.py | c304c10c54ad98d31cdf05d6fbdcadeb30527ce4 | [
"MIT"
] | permissive | ehsan-g/seestan | b9bf5df6bd964555c5bacdf35adb2ac076d36521 | 97cdaac3799f69bbfe444e8b2deb8cc5585f7b77 | refs/heads/main | 2023-05-22T14:02:57.434829 | 2021-06-12T06:36:08 | 2021-06-12T06:36:08 | 343,962,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # Generated by Django 3.1.7 on 2021-04-17 19:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('artworks', '0008_auto_20210417_2326'),
]
operations = [
migrations.AlterField(
model_name='artwork',
name='title',
field=models.CharField(blank=True, default='no title', max_length=200, null=True),
),
]
| [
"[email protected]"
] | |
fdbeadd0109eeff3e1117d28879198a2bd435f01 | 420ce17a31e2685bf3bdff3a827c8a5c0112ab34 | /week1/day5/classExercisesWithFunctions.py | 733e00916b2a375d56022e58e73f98568505a893 | [] | no_license | wsvoboda/digitalcrafts-03-2021 | b384358e328e0a7ed2a9ea2de42dcc3c1c6cbe59 | 815fa44f9507ab7de0850948dd47b71127db9274 | refs/heads/master | 2023-06-02T11:06:13.447798 | 2021-06-16T02:18:00 | 2021-06-16T02:18:00 | 346,116,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | ### First Exercise - divide a number by 2 and give response ###
def printANumber():
number = input("Give me a number and I will divide it by 2! ")
product = int(number) // 2
return print("The response is %s." % product)
printANumber()
### Second Exercise - ask for 2 strings and print them together ###
def addStringsAndPrint():
string1 = input("What is your first name? ")
string2 = input("What is your last name? ")
return print("Your full name is %s %s." % (string1, string2))
addStringsAndPrint()
### Third Exercse - check the length of a string and print it ###
def lengthOfString():
string = input("Enter a word or phrase and I'll tell you how many characters are in it! ")
lengthIs = len(string)
return print("Your entry is %i characters long." % lengthIs)
lengthOfString()
# ## Fourth Exercise - take 2 numbers and add them ###
def addition():
print("Give me 2 numbers and I will add them together!\n")
num1 = int(input("What is your first number? "))
num2 = int(input("What is your second number? "))
sumOfNums = num1 + num2
return print("The sum of your two numbers is %i." % sumOfNums)
addition()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.