metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "221BBakerSt/django_to_heroku",
"score": 2
} |
#### File: django_to_heroku/album/models.py
```python
from django.db import models
from PIL import Image
from django.utils import timezone
class Slide(models.Model):
description = models.CharField(max_length=100, blank=True, null=True)
slide_pic = models.ImageField(upload_to="slide", blank=True, null=True, default=False)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = Slide.objects.get(id=self.id)
except Slide.DoesNotExist:
# object is not in db, nothing to worry about
return "Slide.DoesNotExist"
# is the save due to an update of the actual image file?
if obj.slide_pic and self.slide_pic and obj.slide_pic != self.slide_pic:
# delete the old image file from the storage in favor of the new file
obj.slide_pic.delete()
def delete(self, *args, **kwargs):
# object is being removed from db, remove the file from storage first
self.slide_pic.delete()
return super(Slide, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(Slide, self).save(*args, **kwargs)
def __str__(self):
return self.description
class Meta:
# name the table in db
db_table = "slide"
class Gallery(models.Model):
cover = models.ImageField(upload_to="gallery", blank=True, null=True, default=False)
name = models.CharField(max_length=50)
description = models.CharField(max_length=100, blank=True, null=True)
# the date will be shown on blog page
timestamp = models.DateField(default=timezone.now)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = Gallery.objects.get(id=self.id)
except Gallery.DoesNotExist:
# object is not in db, nothing to worry about
return "Gallery.DoesNotExist"
# is the save due to an update of the actual image file?
if obj.cover and self.cover and obj.cover != self.cover:
# delete the old image file from the storage in favor of the new file
obj.cover.delete()
def delete(self, *args, **kwargs):
# object is being removed from db, remove the file from storage first
self.cover.delete()
return super(Gallery, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(Gallery, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
# name the table in db
db_table = "gallery"
verbose_name_plural = "galleries"
ordering=["timestamp", "name"]
class Photo(models.Model):
photo_pic = models.ImageField(upload_to="photo", blank=True, null=True, default=False)
description = models.CharField(max_length=200, blank=True, null=True)
gallery = models.ForeignKey(Gallery, on_delete=models.DO_NOTHING)
# the date will be shown on blog page
timestamp = models.DateField(default=timezone.now)
# DateTime only used to sort, not shown on blog page
time = models.DateTimeField(default=timezone.now)
view_count = models.IntegerField(default=0)
comment_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
def remove_on_image_update(self):
try:
# is the object in the database yet?
obj = Photo.objects.get(id=self.id)
except Photo.DoesNotExist:
# object is not in db, nothing to worry about
return "Photo.DoesNotExist"
# is the save due to an update of the actual image file?
if obj.photo_pic and self.photo_pic and obj.photo_pic != self.photo_pic:
# delete the old image file from the storage in favor of the new file
obj.photo_pic.delete()
def delete(self, *args, **kwargs):
# object is being removed from db, remove the file from storage first
self.photo_pic.delete()
return super(Photo, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
# object is possibly being updated, if so, clean up.
self.remove_on_image_update()
return super(Photo, self).save(*args, **kwargs)
def __str__(self):
if self.description:
return self.description
else:
return "untitled"
class Meta:
# name the table in db
db_table = "photo"
ordering=["time"]
```
#### File: django_to_heroku/album/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from django.http import request, HttpResponse
from django.core.paginator import *
from .models import *
# the number of photos shown on every page
NUM_ON_EACH_PAGE = 16
def album(request):
slide_pics = Slide.objects.all()
galleries = Gallery.objects.all()
context = {
"slide_pics": slide_pics,
"galleries": galleries,
}
return render(request, "album.html", context)
def gallery(request, name):
gallery = get_object_or_404(Gallery, name=name)
photos = Photo.objects.filter(gallery__name=name)
""" beginning of pagination process """
# set the number of photos shown on every page
paginator = Paginator(photos, NUM_ON_EACH_PAGE)
# set the page request variable
page_request_var = "page"
# get the page number in URL parameter
page_num = request.GET.get(page_request_var)
if None == page_num or "" == page_num:
page_num = 1
elif str == type(page_num):
page_num = int(page_num)
paginated_photos = paginator.get_page(page_num)
# display range: [current_page - 2, current_page + 2]
page_range = list(range(max(page_num - 2, 1), page_num)) + list(range(page_num, min(page_num + 2, paginator.num_pages) + 1))
""" end of pagination process """
context = {
"gallery": gallery,
"photos": paginated_photos,
"page_range": page_range,
"last_page": paginator.num_pages,
"page_request_var": page_request_var,
}
return render(request, "gallery.html", context)
``` |
{
"source": "221B-Haibara/IcecubePepeFit",
"score": 3
} |
#### File: IcecubePepeFit/writer/shelvewriter.py
```python
def write_shelve(outputshelve,index, gamma,norm,cutoff,LLH):
data = {}
#data["gamma"] = float(gamma)
data["norm"] = float(norm)
data["cutoff"]= cutoff
data["gamma"]= gamma
data["LLHR"]= LLH
outputshelve[index] = data
outputshelve.close()
``` |
{
"source": "2221mb/SANUS-WEB",
"score": 3
} |
#### File: SANUS-WEB/Backend/watershed_algo.py
```python
import cv2 as cv
import numpy as np
from PIL import Image
def watershed(fileName):
img = cv.imread(fileName,1)
Img = np.array(img)
gray = cv.cvtColor(np.array(img), cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)
sure_bg = cv.dilate(opening, kernel, iterations=3)
dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)
ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg, sure_fg)
ret, markers = cv.connectedComponents(sure_fg)
markers = markers + 1
markers[unknown == 255] = 0
markers = cv.watershed(Img, markers)
Img[markers == -1] = [255, 0, 0]
tumorImage = cv.cvtColor(Img, cv.COLOR_HSV2BGR)
ws = Image.fromarray(tumorImage, 'RGB')
ws.save("static/"+ str(fileName.split("/")[-1].split(".")[0])+'_rgb.jpg')
# ws.show()
return str(fileName.split("/")[-1].split(".")[0])+'_rgb.jpg'
# return ws
# cv.imshow('tumor_image',tumorImage)
# k=cv.waitKey(0)
# cv.destroyAllWindows()
``` |
{
"source": "2222min/mobileproject",
"score": 3
} |
#### File: mobileproject/websocket_test/websocket_client_chat2.py
```python
from socket import *
from select import select
import sys
# 호스트, 포트와 버퍼 사이즈를 지정
HOST = '192.168.0.5'
PORT = 8000
BUFSIZE = 1024
ADDR = (HOST, PORT)
# 소켓 객체를 만들고
clientSocket = socket(AF_INET, SOCK_STREAM)
# 서버와의 연결을 시도
try:
clientSocket.connect(ADDR)
except Exception as e:
print('채팅 서버(%s:%s)에 연결 할 수 없습니다.' % ADDR)
sys.exit()
print('채팅 서버(%s:%s)에 연결 되었습니다.' % ADDR)
def prompt():
sys.stdout.write('<나> ')
sys.stdout.flush()
# 무한 루프를 시작
while True:
try:
connection_list = [sys.stdin, clientSocket]
read_socket, write_socket, error_socket = select(connection_list, [], [], 10)
for sock in read_socket:
if sock == clientSocket:
data = sock.recv(BUFSIZE)
if not data:
print('채팅 서버(%s:%s)와의 연결이 끊어졌습니다.' % ADDR)
clientSocket.close()
sys.exit()
else:
print('%s' % data) # 메세지 시간은 서버 시간을 따른다
prompt()
else:
message = sys.stdin.readline()
clientSocket.send(message)
prompt()
read_socket.close()
write_socket.close()
error_socket.close()
except KeyboardInterrupt:
clientSocket.close()
sys.exit()
```
#### File: mobileproject/websocket_test/websocket_server_chat.py
```python
import socket
import select
import re
import base64
import hashlib
import struct
import sys
#
# from signal import signal, SIGPIPE, SIG_DFL
#
# signal(SIGPIPE, SIG_DFL)
global server
global input_list
global client_info
def send(input_list, msg):
try:
data = bytearray(msg.encode('utf-8'))
# payload가 126일때 extended_payload_len을 2바이트 가지는데 이때 최대 값이 65535
if len(data) > 65535:
frame = bytearray([b'\x81', 127]) + bytearray(struct.pack('>Q', len(data))) + data
elif len(data) > 125:
frame = bytearray([b'\x81', 126]) + bytearray(struct.pack('>H', len(data))) + data
else:
frame = bytearray([b'\x81', len(data)]) + data
# 클라이언트 리스트 모두에게 send ( 서버가 아니고 sys.stdin도 아닌 )
for client in [sock for sock in input_list if not sock == server and not sock == sys.stdin]: client.sendall(
frame)
except Exception as e:
print("send ERROR : " + str(e))
def recv(client):
first_byte = bytearray(client.recv(1))[0]
second_byte = bytearray(client.recv(1))[0]
FIN = (0xFF & first_byte) >> 7
opcode = (0x0F & first_byte)
mask = (0xFF & second_byte) >> 7
payload_len = (0x7F & second_byte)
if opcode < 3:
# payload_len 구하기
if payload_len == 126:
payload_len = struct.unpack_from('>H', bytearray(client.recv(2)))[0]
elif payload_len == 127:
payload_len = struct.unpack_from('>Q', bytearray(client.recv(8)))[0]
# masking_key 구해서 masking된것 복구하기 ( mask가 1일 경우에만 존재 )
if mask == 1:
masking_key = bytearray(client.recv(4))
masked_data = bytearray(client.recv(payload_len))
data = [masked_data[i] ^ masking_key[i % 4] for i in range(len(masked_data))]
else:
data = bytearray(client.recv(payload_len))
else:
return opcode, bytearray(b'\x00') # opcode 값을 넘김
print(bytearray(data).decode('utf-8', 'ignore')) # 받은거 콘솔에 출력용
return opcode, bytearray(data)
def handshake(client):
try:
request = client.recv(2048)
m = re.match('[\\w\\W]+Sec-WebSocket-Key: (.+)\r\n[\\w\\W]+\r\n\r\n', request)
key = m.group(1) + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
response = "HTTP/1.1 101 Switching Protocols\r\n" + \
"Upgrade: websocket\r\n" + \
"Connection: Upgrade\r\n" + \
"Sec-WebSocket-Accept: %s\r\n" + \
"\r\n"
r = response % ((base64.b64encode(hashlib.sha1(key).digest()),))
client.send(r)
print("---handshake end!---")
except Exception as e:
print("handshake ERROR : " + str(e))
def handler_client(client):
try:
opcode, data = recv(client)
if opcode == 0x8:
print('close frame received')
input_list.remove(client)
return
elif opcode == 0x1:
if len(data) == 0:
# input_list.remove(client)
# return
print("emty data")
else:
# msg = data.decode('utf-8', 'ignore')
msg = [c[1][0] + ":" + str(c[1][1]) for c in client_info if c[0] == client][0] + " :: " + data.decode(
'utf-8', 'ignore')
# 클라이언트 리스트를 매개변수로 보냄
send(input_list, msg)
else:
print('frame not handled : opcode=' + str(opcode) + ' len=' + str(len(data)))
except Exception as e:
print("handler ERROR : " + str(e))
print("disconnected")
input_list.remove(client)
def start_server(host, port):
try:
# host = '192.168.0.5'
# port = 8000
global server
global input_list
global client_info
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((host, port))
server.listen(0)
input_list = [server, sys.stdin] # sys.stdin은 쉘창에서 입력받은것 때문에 넣어줌
client_info = []
print("Server : " + host + ":" + str(port))
while True:
# select 함수는 관찰될 read, write, except 리스트가 인수로 들어가며
# 응답받은 read, write, except 리스트가 반환된다.
# input_list 내에 있는 소켓들에 데이터가 들어오는지 감시한다.
# 다르게 말하면 input_list 내에 읽을 준비가 된 소켓이 있는지 감시한다.
input_ready, write_ready, except_ready = select.select(input_list, [], [], 10)
# 응답받은 read 리스트 처리
for ir in input_ready:
# 클라이언트가 접속했으면 처리함
if ir == server:
client, addr = server.accept()
print("connected : " + str(addr))
handshake(client)
# input_list에 추가함으로써 데이터가 들어오는 것을 감시함
input_list.append(client)
client_info.append((client, addr))
# 쉘 창 입력. 입력된 데이터 클라이언트에게 전송
elif ir == sys.stdin:
send(input_list, "Administrator :: " + sys.stdin.readline())
# 클라이언트소켓에 데이터가 들어왔으면
else:
handler_client(ir)
except Exception as e:
print("start_server ERROR " + str(e))
server.close()
sys.exit()
except KeyboardInterrupt:
# 부드럽게 종료하기
print("키보드 강제 종료")
server.close()
sys.exit()
start_server('192.168.0.5', 8000)
``` |
{
"source": "2231puppy/CircuitPython_RoboCloudLib",
"score": 2
} |
#### File: 2231puppy/CircuitPython_RoboCloudLib/robocloudlib.py
```python
__version__ = "0.0.1"
__repo__ = "https://github.com/2231puppy/Projectstida_CircuitPython_robocloud.git"
class Motors:
"""Motor control class for the RoboCloud"""
def __init__(self, digitalio, board):
self.pwma = digitalio.DigitalInOut(board.IO13)
self.pwmb = digitalio.DigitalInOut(board.IO12)
self.ina1 = digitalio.DigitalInOut(board.IO14)
self.ina2 = digitalio.DigitalInOut(board.IO15)
self.inb1 = digitalio.DigitalInOut(board.IO16)
self.inb2 = digitalio.DigitalInOut(board.IO17)
self.pwma.direction = digitalio.Direction.OUTPUT
self.pwmb.direction = digitalio.Direction.OUTPUT
self.ina1.direction = digitalio.Direction.OUTPUT
self.ina2.direction = digitalio.Direction.OUTPUT
self.inb1.direction = digitalio.Direction.OUTPUT
self.inb2.direction = digitalio.Direction.OUTPUT
def set_speeds(self, motora, motorb):
"""Set speeds for both motors. If speed is 0, it will stop.
Takes 2 arguments: Motor A speed and Motor B speed"""
if motora:
self.pwma.value = True
if motorb:
self.pwmb.value = True
if motora > 0:
self.ina1.value = True
self.ina2.value = False
elif motora < 0:
self.ina1.value = False
self.ina2.value = True
elif motora == 0:
self.ina1.value = False
self.ina2.value = False
if motorb > 0:
self.inb1.value = True
self.inb2.value = False
elif motorb < 0:
self.inb1.value = False
self.inb2.value = True
elif motorb == 0:
self.inb1.value = False
self.inb2.value = False
``` |
{
"source": "2231puppy/Hoist",
"score": 3
} |
#### File: Hoist/hoist/external_server.py
```python
import requests
class ExternalServer:
def __init__(self, ip: str, port: int):
self.ip: str = ip
self.port: int = port
self.url: str = f'http://{ip}:{port}'
self.base: str = self.url + '/hoist/send'
def send(self, message: str) -> str:
resp = requests.get(self.base, params = {'msg': message})
json: dict = resp.json()
return json['RESPONSE']
def check(self) -> bool:
try:
requests.get(self.base)
except:
return False
return True
``` |
{
"source": "22388o/SecurityTokens",
"score": 3
} |
#### File: 22388o/SecurityTokens/script.py
```python
import greenaddress as gdk
import json
# To install GDK, download the GDK python wheel from:
# https://github.com/Blockstream/gdk/releases
# The 'cp' number refers to the python version you have.
# To install GDK, pip install the .whl file:
# pip install greenaddress-0.0.36-cp37-cp37m-linux_x86_64.whl
# GDK README and reference documentation:
# https://github.com/Blockstream/gdk
# https://gdk.readthedocs.io/en/latest/
def main():
# Our calls to GDK are wrapped in the gdk_wallet class, which should only be
# created using either create_new_wallet, login_with_mnemonic or
# login_with_pin methods. The example uses the live Liquid network.
# Initialize GDK.
gdk.init({})
# Wallet creation and login using Mnemonic
# ========================================
# To create a wallet with a Managed Assets account, pass a mnemonic
# into the following. You can generate a 24 word mnemonic yourself or
# have GDK generate it for you by leaving mnemonic as None.
# You can choose to create a wallet that's covered by 2FA or not.
# 2FA can be activated or deactivated at any point in time.
"""
wallet = gdk_wallet.create_new_wallet(create_with_2fa_enabled=False, mnemonic=None)
print(f'\nMnemonic: {wallet.mnemonic}')
"""
# To login to an existing wallet you can either use the mnemonic or pin.
# Later we'll see how to use a pin, for now we will use the mnemonic.
mnemonic = 'your twenty four word mnemonic goes here with single spaced words'
if not gdk.validate_mnemonic(mnemonic):
raise Exception("Invalid mnemonic.")
# Login to a GDK wallet session using the mnemonic.
wallet = gdk_wallet.login_with_mnemonic(mnemonic)
# We can now perform calls against the session, such as get balance for
# the logged in Blockstream AMP Managed Assets account.
balance = wallet.get_balance()
print(f'\n{json.dumps(balance, indent=4)}')
# Using a pin to encrypt the mnemonic and login
# =============================================
# You can also login using a pin. Setting the pin for the wallet returns
# encrypted data that is saved to file. When you login with the pin, the
# server will give you the key to decrypt the mnemonic which it uses to
# login. If the pin is entered incorrectly 3 times the server will delete
# the key and you must use the mnemonic to login.
"""
# Before setting the pin, login with the wallet's mnemonic.
wallet = gdk_wallet.login_with_mnemonic(mnemonic)
# Then set the pin for the wallet, this saves encrypted data to file.
# Don't use the example value below, set you own.
pin = 123456
# You only need to set the pin data once.
wallet.set_pin(mnemonic, pin)
# After setting the pin you can then login using pin and do not have to
# enter the mnemonic again. The pin is used to decrypt the local file.
wallet.login_with_pin(pin)
"""
# Two factor authorization
# ========================
# You can add Two Factor Authentication (2FA) to a wallet when you create
# it or enable or disable 2FA at a later date.
# Check the current 2FA status for the wallet.
twofactor_status = wallet.get_current_2fa_status()
print(f'\n{json.dumps(twofactor_status, indent=4)}')
# The example below will enable 2FA on an existing wallet and uses email by
# default, which you can amend if you want.
"""
try:
wallet.twofactor_auth_enabled(False)
except RuntimeError as e:
# Will error if 2FA is already enabled
print(f'\nError: {e}\n')
"""
# Getting notification data from GDK to obtain the last block height
# ==================================================================
# The fetch_block_height example shows how to handle notification events
# from Green by processing the notifications queue.
block_height = wallet.fetch_block_height()
print(f'\nCurrent Liquid block height: {block_height}')
# Getting a new address and understanding pointers
# ================================================
# The new address returned will be confidential, whereas GDK transactions
# will show the unconfidential address. For this reason, use the address
# 'pointer' to identify it in transactions. The pointer plus sub account
# index maps to a derivation path so you can use pointers within each
# sub account to link confidential and unconfidential addresses. Be sure
# to note that you must consider which sub account you are using when
# using the pointer as an identifier like this.
address_info = wallet.get_new_address()
print(f'Address: {address_info["address"]}')
print(f'Address pointer: {address_info["pointer"]}')
# Each call creates a new address/pointer pair for the user.
address_info = wallet.get_new_address()
print(f'Address: {address_info["address"]}')
print(f'Address pointer: {address_info["pointer"]}')
# Getting transaction data from Green using GDK
# =============================================
txs = wallet.get_wallet_transactions()
for tx in txs:
print(f'TRANSACTION ID : {tx["txhash"]}')
print(f'CONFIRMATION STATUS : {tx["confirmation_status"]}')
print(f'BLOCK HEIGHT : {tx["block_height"]}')
print(f'TYPE : {tx["type"]}')
print(f'INPUT COUNT : {len(tx["inputs"])}')
print(f'OUTPUT COUNT : {len(tx["outputs"])}\n')
# Sending assets
# ==============
# Please be aware that AMP issued assets are issued with a precision
# that affects how the number of sats sent are converted to the number
# of units of the asset itself. Please refer to the examples under
# 'precision' on the following page for more details and examples:
# https://docs.blockstream.com/blockstream-amp/api-tutorial.html#issuing-an-asset
# If the asset is registered with the Liquid Assets Registry you can
# check the precision using the following link, or check with the
# asset's issuer:
# https://blockstream.info/liquid/assets
amount_sat = 1
asset_id = 'asset id here'
address = 'destination address here'
txid = wallet.send_to_address(amount_sat, asset_id, address)
if txid:
print(f'\nTransaction sent. Txid: {txid}')
else:
print(f'\nTransaction failed. See error logging.')
class gdk_wallet:
"""Class method to create and return an instance of gdk_wallet"""
@classmethod
def create_new_wallet(cls, create_with_2fa_enabled, mnemonic=None):
self = cls()
# Create a new wallet with a Managed Assets account.
# You can pass in a mnemonic generated outside GDK if you want, or have
# GDK generate it for you by omitting it. 2FA is enabled if chosen and
# can be enabled/disabled at any point.
if not mnemonic:
self.mnemonic = gdk.generate_mnemonic()
# Set the network name to 'liquid' for the live Liquid network.
# There is currently no test Liquid network.
self.session = gdk.Session({'name': 'liquid'})
self.session.register_user({}, self.mnemonic).resolve()
self.session.login({}, self.mnemonic).resolve()
self.session.create_subaccount({'name': self.SUBACCOUNT_NAME, 'type': self.AMP_ACCOUNT_TYPE}).resolve()
if create_with_2fa_enabled:
self.twofactor_auth_enabled(True)
return self
"""Class method to create and return an instance of gdk_wallet"""
@classmethod
def login_with_mnemonic(cls, mnemonic):
self = cls()
self.mnemonic = mnemonic
self.session = gdk.Session({'name': 'liquid'})
self.session.login({}, self.mnemonic).resolve()
self.fetch_subaccount()
return self
"""Class method to create and return an instance of gdk_wallet"""
@classmethod
def login_with_pin(cls, pin):
self = cls()
pin_data = open(self.PIN_DATA_FILENAME).read()
self.session = gdk.Session({'name': 'liquid'})
self.session.login_with_pin(str(pin), pin_data).resolve()
self.fetch_subaccount()
return self
"""Do not use this to instantiate the object, use create_new_wallet or login_with_*"""
def __init__(self):
# 2of2_no_recovery is the account type used by Blockstream AMP.
# Do not change this value!
self.AMP_ACCOUNT_TYPE = '2of2_no_recovery'
# 'Managed Assets' is the same name as Green mobile and desktop use.
# You can change this if you like, but note that account type and
# name are used to retrieve the correct account and should be unique
# per wallet so you can retrieve the right account when you login.
self.SUBACCOUNT_NAME = 'Managed Assets'
# If you use a pin to login, the encrypted data will be saved and read
# from this file:
self.PIN_DATA_FILENAME = 'pin_data.json'
self.mnemonic = None
self.session = None
self.subaccount_pointer = None
self.gaid = None
self.last_block_height = 0
def set_pin(self, mnemonic, pin):
pin_data = gdk.set_pin(self.session.session_obj, mnemonic, str(pin), str('device_id_1'))
open(self.PIN_DATA_FILENAME, 'w').write(pin_data)
return pin_data
def get_balance(self):
return self.session.get_balance({'subaccount': self.subaccount_pointer, 'num_confs': 0}).resolve()
def get_current_2fa_status(self):
return self.session.get_twofactor_config()
def twofactor_auth_enabled(self, enabled):
# We will use email but others are available ('sms', 'phone', 'gauth').
# https://gdk.readthedocs.io/en/latest/gdk-json.html#twofactor-detail
method = 'email'
if enabled:
print('\nRequesting email authentication is enabled for this account')
email = input('\nPlease enter the email address that you will use to authenticate 2FA requests: ')
details = {'confirmed': False, 'enabled': True, 'data': email}
else:
print('\nRequesting email authentication is disabled for this account')
details = {'confirmed': True, 'enabled': False}
# The following is an example of how to handle the GDK authentication
# state machine as it progresses to completion.
self._gdk_resolve(gdk.change_settings_twofactor(self.session.session_obj, method, json.dumps(details)))
def _gdk_resolve(self, auth_handler):
# Processes and handles the state of calls that need authentication.
# The authentication process works as a state machine and may require
# input to progress. This example only uses email as a authentication
# method. If you would like to user other methods such as sms, phone,
# gauth or a hardware device see:
# https://github.com/Blockstream/green_cli/blob/842697b1c6e382487a2e00606c17d6637fe62e7b/green_cli/green.py#L75
while True:
status = gdk.auth_handler_get_status(auth_handler)
status = json.loads(status)
state = status['status']
if state == 'error':
raise RuntimeError(f'\nAn error occurred authenticating the call: {status}')
if state == 'done':
print('\nAuthentication succeeded or not required\n')
return status['result']
if state == 'request_code':
authentication_factor = 'email'
print(f'\nCode requested via {authentication_factor}.')
gdk.auth_handler_request_code(auth_handler, authentication_factor)
elif state == 'resolve_code':
resolution = input('\nPlease enter the authentication code you received: ')
gdk.auth_handler_resolve_code(auth_handler, resolution)
elif state == 'call':
gdk.auth_handler_call(auth_handler)
def fetch_subaccount(self):
subaccounts = self.session.get_subaccounts().resolve()
for subaccount in subaccounts['subaccounts']:
if self.AMP_ACCOUNT_TYPE == subaccount['type'] and self.SUBACCOUNT_NAME == subaccount['name']:
self.subaccount_pointer = subaccount['pointer']
break
if not self.subaccount_pointer:
raise Exception(f'Cannot find the sub account with name: "{self.SUBACCOUNT_NAME}" and type: "{self.AMP_ACCOUNT_TYPE}"')
self.gaid = self.session.get_subaccount(self.subaccount_pointer).resolve()['receiving_id']
# The subaccount's receiving_id is the Green Account ID (GAID)
# required for user registration with Transfer-Restricted assets.
# Notification queue always has the last block in after session login.
self.fetch_block_height()
def fetch_block_height(self):
# New blocks are added to notifications as they are found so we need to
# find the latest or, if there hasn't been one since we last checked,
# use the value set during login in the session_login method.
# The following provides an example of using GDK's notification queue.
q = self.session.notifications
while not q.empty():
notification = q.get(block=True, timeout=1)
event = notification['event']
if event == 'block':
block_height = notification['block']['block_height']
if block_height > self.last_block_height:
self.last_block_height = block_height
return self.last_block_height
def get_new_address(self):
return self.session.get_receive_address({'subaccount': self.subaccount_pointer}).resolve()
def get_wallet_transactions(self):
# Get the current block height so we can include confirmation status in
# the returned data.
chain_block_height = self.fetch_block_height()
# We'll use possible statuses of UNCONFIRMED, CONFIRMED, FINAL.
confirmed_status = None
depth_from_tip = 0
all_txs = []
index = 0
# You can override the default number (30) of transactions returned:
count = 10
while(True):
# Transactions are returned in the order of most recently seen
# transaction first. It is possible for a transaction seen less
# recently to be unconfimred while a more recent transaction is
# confirmed.
transactions = self.session.get_transactions({'subaccount': self.subaccount_pointer, 'first': index, 'count': count}).resolve()
for transaction in transactions['transactions']:
confirmation_status = 'UNCONFIRMED'
block_height = transaction['block_height']
# Unconfirmed txs will have a block_height of 0.
if block_height > 0:
depth_from_tip = chain_block_height - block_height
# A transaction with 1 confirmation will have a depth of 0.
if depth_from_tip == 0:
confirmation_status = 'CONFIRMED'
if depth_from_tip > 0:
confirmation_status = 'FINAL'
transaction['confirmation_status'] = confirmation_status
all_txs.append(transaction)
if len(transactions['transactions']) < count:
break
index = index + 1
return all_txs
def send_to_address(self, sat_amount, asset_id, destination_address):
details = {
'subaccount': self.subaccount_pointer,
'addressees': [{'satoshi': sat_amount, 'address': destination_address, 'asset_tag': asset_id}]
}
try:
details = self._gdk_resolve(gdk.create_transaction(self.session.session_obj, json.dumps(details)))
details = self._gdk_resolve(gdk.sign_transaction(self.session.session_obj, json.dumps(details)))
details = self._gdk_resolve(gdk.send_transaction(self.session.session_obj, json.dumps(details)))
return details['txhash']
except RuntimeError as e:
print(f'\nError: {e}\n')
if __name__ == "__main__":
main()
``` |
{
"source": "2239559319/xiaochuanhttpsproxy",
"score": 3
} |
#### File: xiaochuanhttpsproxy/xiaochuanhttpsproxy/start.py
```python
import os
def start(page):
'''
:param page:抓取的页数
:return:
'''
path=os.getcwd().split('\\')
if 'xiaochuanhttpsproxy'==path[len(path)-1]:
os.system("python enter.py %d"%page)
``` |
{
"source": "223kazuki/SublimeMyTodo",
"score": 3
} |
#### File: 223kazuki/SublimeMyTodo/SimplestTodo.py
```python
import sublime, sublime_plugin
class ListTodosCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings('SimplestTodo.sublime-settings')
TODO_STR = settings.get("todo_prefix")
DONE_STR = settings.get("done_prefix")
self.keys = []
self.lines = []
allContent = sublime.Region(0, self.view.size())
regions = self.view.split_by_newlines(allContent)
for r in enumerate(regions):
line = self.view.substr(r[1])
if TODO_STR in line:
self.keys.append(line)
self.lines.append(r)
sublime.active_window().show_quick_panel(self.keys, self.goto)
def goto(self, arrpos):
if arrpos >= 0:
target_region = self.lines[arrpos][1]
target_line = self.view.substr(self.lines[arrpos][1])
self.view.sel().clear()
self.view.sel().add(target_region)
self.view.show(self.lines[arrpos][1])
sublime.active_window().run_command("toggle_todo")
sublime.active_window().run_command("list_todos")
def toggle(s):
settings = sublime.load_settings('SimplestTodo.sublime-settings')
TODO_STR = settings.get("todo_prefix")
DONE_STR = settings.get("done_prefix")
if TODO_STR in s:
return s.replace(TODO_STR, DONE_STR)
else:
return s.replace(DONE_STR, TODO_STR)
class ToggleTodoCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
line = view.line(view.sel()[0])
before = view.substr(line)
after = toggle(before)
view.replace(edit, line, after)
class NewTodoCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings('SimplestTodo.sublime-settings')
TODO_STR = settings.get("todo_prefix")
DONE_STR = settings.get("done_prefix")
view = self.view
line = view.line(view.sel()[0])
before = view.substr(line)
after = before + TODO_STR
view.replace(edit, line, after)
end = line.end() + len(TODO_STR)
r = sublime.Region(end, end);
self.view.sel().clear()
self.view.sel().add(r)
``` |
{
"source": "224alpha/Python",
"score": 4
} |
#### File: 224alpha/Python/Day_of_week.py
```python
import calendar
import datetime
def findDay(date):
born = datetime.datetime.strptime(date, '%d %m %Y').weekday()
return (calendar.day_name[born])
# Driver program
date = '03 02 2019'
print(findDay(date))
```
#### File: Python/Flappy Bird - created with tkinter/Settings.py
```python
import os
from json import dumps
from json import loads
class Settings(object):
"""
Classe com todas as configurações do jogo
"""
# Configurações da janela
window_name = "Flappy Bird"
window_rz = (False, False)
window_fullscreen = True
window_width = None
window_height = None
# Configurações dos botões
button_width = 22
button_height = 17
button_bg = "black"
button_fg = "white"
button_activebackground = "black"
button_font = ("Impact", 40)
button_position_y = 85
button_cursor = "hand2"
# Configurações da imagem do placar do jogador
scoreboard_width = 40
scoreboard_height = 40
scoreboard_position_y = 50
# Configurações de texto do placar
text_font = "Autumn"
text_fill = "White"
# Configurações do título do jogo
title_width = 35
title_height = 15
title_position_y = 15
# Eventos
bird_event = "<Up>"
window_fullscreen_event = "<F11>"
window_start_event = "<Return>"
window_exit_event = "<Escape>"
# Caminhos de arquivos
background_fp = "Images/background.png"
bird_fp = "Images/bird.png"
startButton_fp = "Images/start_button.png"
exitButton_fp = "Images/exit_button.png"
tube_fp = ["Images/tube.png", "Images/tube_mouth.png"]
title_fp = "Images/title.png"
scoreboard_fp = "Images/scoreboard.png"
score_fp = "Data/scr.txt"
settings_fp = "Data/settings.json"
# Configurações de animação
background_animation = True
# Junta todos os diretórios em uma lista
images_fp = [background_fp, bird_fp, startButton_fp, exitButton_fp, tube_fp[0], tube_fp[1], title_fp]
def setOptions(self):
"""
Método para receber algumas configurações do jogo de um arquivo .json.
Caso o arquivo não exista, será criado um com as configurações padrões.
"""
# Alguns atributos que podem ser alterados
attributes = "window_fullscreen,window_width,window_height".split(',')
# Tenta abrir o arquivo parar leitura
try:
file = open(self.settings_fp, 'r')
data = loads(file.read())
file.close()
# Define os atributos com os valores obtidos do arquivo desde que sejam
# referentes à eventos ou estejam na lista de atributos permitidos.
for attr in data:
if "event" in attr or attr in attributes:
setattr(Settings, attr, data[attr])
# Caso não exista um arquivo para obter as configurações, ele será criado
except:
# Caso não exista o diretório, o mesmo será criado.
if not os.path.exists(os.path.split(self.settings_fp)[0]):
os.mkdir(os.path.split(self.settings_fp)[0])
file = open(self.settings_fp, 'w')
data = dict()
# Armazena no arquivo atributos com seus valores padrões desde que sejam
# referentes à eventos ou estejam na lista de atributos permitidos.
for attr in Settings.__dict__:
if "event" in attr or attr in attributes:
data[attr] = Settings.__dict__[attr]
# Coloca as informações no arquivo e o fecha
file.write(dumps(data, indent=2))
file.close()
```
#### File: 224alpha/Python/Polyline.py
```python
import simplegui
polyline = []
def click(pos):
global polyline
polyline.append(pos)
def clear():
global polyline
polyline = []
def draw(canvas):
if len(polyline) == 1:
canvas.draw_point(polyline[0], "White")
for i in range(1, len(polyline)):
canvas.draw_line(polyline[i - 1], polyline[i], 2, "White")
frame = simplegui.create_frame("Echo click", 300, 200)
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
frame.add_button("Clear", clear)
frame.start()
```
#### File: 224alpha/Python/RandomDice.py
```python
from random import randint
from tkinter import *
def roll():
text.delete(0.0, END)
text.insert(END, str(randint(1, 100)))
window = Tk()
text = Text(window, width=3, height=1)
buttonA = Button(window, text="Press to roll!", command=roll)
text.pack()
buttonA.pack()
``` |
{
"source": "225559/algorithms",
"score": 2
} |
#### File: algorithms/hard-disc-model/event.py
```python
class Event:
def __init__(self, time, i, j):
self.time = time
self.i = i
self.j = j
if i is not None:
self.i_cc = self.i.cc
if j is not None:
self.j_cc = self.j.cc
def invalid(self):
invalid_i = self.i is not None and self.i_cc != self.i.cc
invalid_j = self.j is not None and self.j_cc != self.j.cc
if invalid_i or invalid_j:
return True
return False
def __lt__(self, other):
return self.time < other.time
```
#### File: algorithms/hard-disc-model/window.py
```python
import tkinter as tk
class Window:
def __init__(self, unit_box_size, particles):
self.unit_box_size = unit_box_size
self.particles = particles
self.window = tk.Tk()
self.window.title("Hard Disc Model - Particle Simulation")
self.avg_ink_distance_from_center = tk.Text(self.window, height=1)
self.avg_ink_distance_from_center.pack()
self.canvas = tk.Canvas(self.window, width=unit_box_size, height=unit_box_size, bg='#060606')
self.canvas.pack()
self.grid_lines = 5
def create_circle(self, x, y, r, color):
x0 = x - r
x1 = x + r
y0 = y - r
y1 = y + r
return self.canvas.create_oval(x0, y0, x1, y1, fill=color, outline=color)
def draw_grid(self):
"""draw the grid lines in the unit box"""
size = int(self.unit_box_size)
for i in range(0, size, int(size/self.grid_lines)):
self.canvas.create_line(i, 0, i, size, width=1, fill='#363636')
self.canvas.create_line(0, i, size, i, width=1, fill='#363636')
def redraw(self):
"""redraw all particles, avg. ink position from center, and ink:water ratio"""
self.canvas.update()
self.canvas.update_idletasks()
self.canvas.delete("all") # clear
avg = 0
N = len(self.particles)
ink_counter = [[0 for x in range(self.grid_lines)] for x in range(self.grid_lines)]
water_counter = [[0 for x in range(self.grid_lines)] for x in range(self.grid_lines)]
for particle in self.particles:
x = particle.position.x * self.unit_box_size
y = particle.position.y * self.unit_box_size
r = particle.radius * self.unit_box_size
self.create_circle(x, y, r, particle.color)
if particle.kind == "ink":
avg += particle.distance_from_center()
for i in range(0, self.grid_lines):
if (particle.position.x >= (i / self.grid_lines)) and (particle.position.x < ((i + 1) / self.grid_lines)):
for j in range(0, self.grid_lines + 1):
if (particle.position.y >= (j / self.grid_lines)) and (particle.position.y < ((j + 1) / self.grid_lines)):
if particle.kind == "ink":
ink_counter[i][j] += 1
if particle.kind == "water":
water_counter[i][j] += 1
break
break
mult = self.unit_box_size / self.grid_lines
for i in range(0, self.grid_lines):
for j in range(0, self.grid_lines):
self.canvas.create_text((i * mult) + 10, (j * mult) + 10, fill="#696969", font="Times 8", text=str(ink_counter[i][j]))
self.canvas.create_text((i * mult) + 20, (j * mult) + 10, fill="#696969", font="Times 8", text=":")
self.canvas.create_text((i * mult) + 27, (j * mult) + 10, fill="#696969", font="Times 8", text=str(ink_counter[i][j] + water_counter[i][j]))
s = 'Avg. ink particle distance from center: {:f}'.format(avg/N)
self.avg_ink_distance_from_center.delete("1.0", "end")
self.avg_ink_distance_from_center.insert(tk.END, s)
self.draw_grid()
``` |
{
"source": "226wyj/You-Draw-I-Guess",
"score": 2
} |
#### File: 226wyj/You-Draw-I-Guess/main.py
```python
from predict import Predictor
from train import Trainer
from test import Tester
from models.lenet import LeNet
from models.vgg16 import Vgg16_Net
from models.resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from dataset import DataSet, DataBuilder
from util import check_path, show_model
import torch as t
import torch.nn as nn
from torch import optim
import argparse
import os
def main(args):
check_path(args)
# CIFAR-10的全部类别,一共10类
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# 数据集
data_builder = DataBuilder(args)
dataSet = DataSet(data_builder.train_builder(), data_builder.test_builder(), classes)
# 选择模型
if args.lenet:
net = LeNet()
model_name = args.name_le
elif args.vgg:
net = Vgg16_Net()
model_name = args.name_vgg
elif args.resnet18:
net = ResNet18()
model_name = args.name_res18
elif args.resnet34:
net = ResNet34()
model_name = args.name_res34
elif args.resnet50:
net = ResNet50()
model_name = args.name_res50
elif args.resnet101:
net = ResNet101()
model_name = args.name_res101
elif args.resnet152:
net = ResNet152()
model_name = args.name_res152
# 交叉熵损失函数
criterion = nn.CrossEntropyLoss()
# SGD优化器
optimizer = optim.SGD(
net.parameters(),
lr=args.learning_rate,
momentum=args.sgd_momentum,
weight_decay=args.weight_decay
)
# 余弦退火调整学习率
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=150)
# 模型的参数保存路径
model_path = os.path.join(args.model_path, model_name)
# 启动训练
if args.do_train:
print("Training...")
trainer = Trainer(net, criterion, optimizer, scheduler,
dataSet.train_loader, dataSet.test_loader, model_path, args)
trainer.train(epochs=args.epoch)
# t.save(net.state_dict(), model_path)
# 启动测试,如果--do_train也出现,则用刚刚训练的模型进行测试
# 否则就使用已保存的模型进行测试
if args.do_eval:
if not args.do_train and not os.path.exists(model_path):
print("Sorry, there's no saved model yet, you need to train first.")
return
# --do_eval
if not args.do_train:
checkpoint = t.load(model_path)
net.load_state_dict(checkpoint['net'])
accuracy = checkpoint['acc']
epoch = checkpoint['epoch']
print("Using saved model, accuracy : %f epoch: %d" % (accuracy, epoch))
tester = Tester(dataSet.test_loader, net, args)
tester.test()
if args.show_model:
if not os.path.exists(model_path):
print("Sorry, there's no saved model yet, you need to train first.")
return
show_model(args)
if args.do_predict:
device = t.device("cuda" if t.cuda.is_available() else "cpu")
checkpoint = t.load(model_path, map_location=device)
net.load_state_dict(checkpoint['net'])
predictor = Predictor(net, classes)
img_path = 'test'
img_name = [os.path.join(img_path, x) for x in os.listdir(img_path)]
for img in img_name:
predictor.predict(img)
# img_path = 'test/cat0.jpg'
# predictor.predict(img_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Data
parser.add_argument("--num_workers", default=0, type=int, help="Thread number for training.")
parser.add_argument("--is_download", default=True, type=bool, help="Download the datasets if there is no data.")
# Dir
parser.add_argument("--data_path", default="data", type=str, help="The directory of the CIFAR-10 data.")
parser.add_argument("--model_path", default="saved", type=str, help="The directory of the saved model.")
# File Name
parser.add_argument("--name_le", default="lenet.pth", type=str, help="The name of the saved model's parameters.")
parser.add_argument("--name_vgg", default="vgg16.pth", type=str, help="The name of the saved model's parameters.")
parser.add_argument("--name_res18", default="resnet18.pth", type=str, help="The name of the saved model's parameters.")
parser.add_argument("--name_res34", default="resnet34.pth", type=str, help="The name of the saved model's parameters.")
parser.add_argument("--name_res50", default="resnet50.pth", type=str, help="The name of the saved model's parameters.")
parser.add_argument("--name_res101", default="resnet101.pth", type=str, help="The name of the saved model's parameters.")
parser.add_argument("--name_res152", default="resnet152.pth", type=str, help="The name of the saved model's parameters.")
# Train
parser.add_argument("--batch_size", default=128, type=int, help="Batch size for training and evaluation.")
parser.add_argument("--epoch", default=10, type=int, help="The number of training epochs.")
parser.add_argument("--seed", default=42, type=int, help="The random seed used for initialization.")
# Hyper Parameters
parser.add_argument("--learning_rate", default=0.01, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=5e-4, type=int, help="Weight decay of SGD optimzer.")
parser.add_argument("--sgd_momentum", default=0.9, type=float, help="The momentum of the SGD optimizer.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="The Epsilon of Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
# Commands
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--do_predict", action="store_true", help="Predict single image with the saved model.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available.")
parser.add_argument("--show_model", action="store_true", help="Display the state dict of the model.")
parser.add_argument("--lenet", action="store_true", help="Use LeNet-5 as the model.")
parser.add_argument("--vgg", action="store_true", help="Use VGG-16 as the model.")
parser.add_argument("--resnet18", action="store_true", help="Use ResNet as the model.")
parser.add_argument("--resnet34", action="store_true", help="Use ResNet as the model.")
parser.add_argument("--resnet50", action="store_true", help="Use ResNet as the model.")
parser.add_argument("--resnet101", action="store_true", help="Use ResNet as the model.")
parser.add_argument("--resnet152", action="store_true", help="Use ResNet as the model.")
args = parser.parse_args()
main(args)
```
#### File: 226wyj/You-Draw-I-Guess/predict.py
```python
import torch as t
from torch.autograd import Variable
import torchvision.transforms as transforms
from PIL import Image
class Predictor():
def __init__(self, net, classes):
self.net = net
self.classes = classes
self.device = t.device("cuda" if t.cuda.is_available() else "cpu")
self.net.to(self.device)
def predict_transform(self):
transform = transforms.Compose([
transforms.Resize(size=(32, 32)),
transforms.ToTensor(), # 转换成Tensor
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
return transform
# 单张图片的预测
def predict(self, img_path):
transform = self.predict_transform()
img = Image.open(img_path)
img_tensor = transform(img).to(self.device)
img_tensor = Variable(t.unsqueeze(img_tensor, 0).float(), requires_grad=False)
with t.no_grad():
self.net.eval()
output = self.net(img_tensor)
_, predicted = t.max(output, 1)
print("下标: %d\t\t标签: " % predicted, self.classes[predicted])
return predicted
```
#### File: You-Draw-I-Guess/st/run.py
```python
import sys
sys.path.append('..')
sys.path.append('.')
import streamlit as st
from predict import Predictor
from models.lenet import LeNet
from models.resnet import ResNet18, ResNet34, ResNet50, ResNet101
from models.vgg16 import Vgg16_Net
import torch as t
import os
from st.contents import intro_team, intro_dataset, \
intro_method, intro_reference, show_img
def main():
# Title
st.title("Image Classification System")
st.markdown(
"""
<br><br />
This is the final project of our team.
<br><br />
We implemented a simple image classification system using deep learning method.
<br><br />
To read more about how the system works, click below.
"""
, unsafe_allow_html=True
)
more_info = st.button("More about this")
st.markdown("---")
if more_info:
presentation()
else:
classify_img()
def presentation():
back_to_system = st.button("Back", key="first")
if back_to_system:
classify_img()
# team member introduction
st.markdown("## 1. Team members")
st.markdown("There are two members in our team:")
intro_team()
st.markdown("---")
# project module introduction
st.markdown("## 2. Structure of the project")
show_img("https://raw.githubusercontent.com/226wyj/You-Draw-I-Guess/main/st/img/module.jpg?token=<KEY>")
st.markdown(
"""
<br><br />
Sample training command:
"""
, True
)
st.code(
"""
python main.py --do_train --vgg --batch_size=64 --epoch=200
"""
)
st.markdown("---")
# Technology stack introduction
st.markdown("## 3. Methods Used")
intro_method()
st.markdown("---")
# dataset introduction
st.markdown("## 4. Dataset: CIFAR-10")
intro_dataset()
show_img("https://raw.githubusercontent.com/226wyj/You-Draw-I-Guess/main/st/img/cifar-10.jpg?token=<KEY>")
st.markdown(
"""
<br><br />
Data Process:
"""
, True
)
st.code(
"""
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
"""
)
st.markdown("---")
# References
st.markdown("## 5. References")
intro_reference()
st.markdown("---")
back_to_system = st.button("Back", key="second")
if back_to_system:
classify_img()
def get_model(model):
model_path = '../saved'
if model == 'LeNet-5':
net = LeNet()
model_name = 'lenet.pth'
elif model == 'VGG-16':
net = Vgg16_Net()
model_name = 'vgg16.pth'
elif model == 'ResNet18':
net = ResNet18()
model_name = 'resnet18.pth'
elif model == 'ResNet34':
net = ResNet34()
model_name = 'resnet34.pth'
elif model == 'ResNet50':
net = ResNet50()
model_name = 'resnet50.pth'
else:
net = ResNet101()
model_name = 'resnet101.pth'
return net, os.path.join(model_path, model_name)
def classify_img():
# all classes in CIFAR-10
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
st.header("All types:")
st.subheader("Plane, Car, Bird, Cat, Deer, Dog, Frog, Hourse, Ship, Truck")
# add a side bar
side_bar = st.sidebar
side_bar.title("Select your model")
# add a selectbox to the sidebar
model = side_bar.radio(
'Model Name ',
('LeNet-5','VGG-16','ResNet18',
'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152')
)
# upload image
upload_img = st.file_uploader("Please upload your image", type="jpg")
if upload_img is not None:
st.image(upload_img)
net, model_path = get_model(model)
checkpoint = t.load(model_path, map_location=t.device('cpu'))
net.load_state_dict(checkpoint['net'])
accuracy = checkpoint['acc']
epoch = checkpoint['epoch']
st.write("Using %s , test accuracy : %f, epoch: %d" % (model, accuracy, epoch))
predictor = Predictor(net, classes)
result = predictor.predict(upload_img)
side_bar.title("Result")
side_bar.success("The picture is a %s" % classes[result])
side_bar.title("Accuracy")
side_bar.warning("test accuracy : %f" % (accuracy))
if __name__ == '__main__':
main()
``` |
{
"source": "2279209430/BVQA-2021",
"score": 2
} |
#### File: BVQA-2021/QualityAwarePretrain/Kadis700kFolder_DistortionNet.py
```python
import torch.utils.data as data
from PIL import Image
import os
import os.path
# import math
import scipy.io
import numpy as np
import random
import csv
def getFileName(path, suffix):
''' 获取指定目录下的所有指定后缀的文件名 '''
filename = []
f_list = os.listdir(path)
# print f_list
for i in f_list:
# os.path.splitext():分离文件名与扩展名
if os.path.splitext(i)[1] == suffix:
filename.append(i)
return filename
def getDistortionTypeFileName(path, num):
filename = []
index = 1
for i in range(0, num):
name = '%s%s%s' % ('img', str(index), '.bmp')
filename.append(os.path.join(path, name))
index = index + 1
return filename
class Kadis700kFolder_DistortionNet(data.Dataset):
def __init__(self, root, loader, index, transform=None, target_transform=None):
self.root = root
self.loader = loader
self.imgname = []
self.mos = []
self.d_type = []
self.d_level = []
self.mat_file = os.path.join(self.root, 'kadis700k.mat')
datainfo = scipy.io.loadmat(self.mat_file)
image_number = len(datainfo['ref_img_name'])
for i in range(0, image_number): # image_number
self.imgname.append(datainfo['dis_img_name'][i][0][0])
mos = float(datainfo['label'][i][0])
mos = np.array(mos)
mos = mos.astype(np.float32)
self.mos.append(mos)
d_type = float(datainfo['d_type'][i][0])
d_type = np.array(d_type)
d_type = d_type.astype(np.int64)
self.d_type.append(d_type)
d_level = float(datainfo['d_level'][i][0])
d_level = np.array(d_level)
d_level = d_level.astype(np.int64)
self.d_level.append(d_level)
sample = []
for i, item in enumerate(index):
sample.append(item)
self.samples = sample
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
ref_idx = self.samples[index]
image_name1 = os.path.join(self.root, 'dist_imgs', self.imgname[ref_idx * 5])
image_name2 = os.path.join(self.root, 'dist_imgs', self.imgname[ref_idx * 5 + 1])
image_name3 = os.path.join(self.root, 'dist_imgs', self.imgname[ref_idx * 5 + 2])
image_name4 = os.path.join(self.root, 'dist_imgs', self.imgname[ref_idx * 5 + 3])
image_name5 = os.path.join(self.root, 'dist_imgs', self.imgname[ref_idx * 5 + 4])
I1 = self.loader(image_name1)
I2 = self.loader(image_name2)
I3 = self.loader(image_name3)
I4 = self.loader(image_name4)
I5 = self.loader(image_name5)
if self.transform is not None:
I1 = self.transform(I1)
I2 = self.transform(I2)
I3 = self.transform(I3)
I4 = self.transform(I4)
I5 = self.transform(I5)
I1_D = self.d_type[ref_idx * 5] - 1
I2_D = self.d_type[ref_idx * 5 + 1] - 1
I3_D = self.d_type[ref_idx * 5 + 2] - 1
I4_D = self.d_type[ref_idx * 5 + 3] - 1
I5_D = self.d_type[ref_idx * 5 + 4] - 1
I1_DL = 6 - self.d_level[ref_idx * 5]
I2_DL = 6 - self.d_level[ref_idx * 5 + 1]
I3_DL = 6 - self.d_level[ref_idx * 5 + 2]
I4_DL = 6 - self.d_level[ref_idx * 5 + 3]
I5_DL = 6 - self.d_level[ref_idx * 5 + 4]
I1_M = self.mos[ref_idx * 5]
I2_M = self.mos[ref_idx * 5 + 1]
I3_M = self.mos[ref_idx * 5 + 2]
I4_M = self.mos[ref_idx * 5 + 3]
I5_M = self.mos[ref_idx * 5 + 4]
# sample = []
sample = {'I1': I1, 'I1_D': I1_D, 'I1_DL': I1_DL, 'I1_M': I1_M,
'I2': I2, 'I2_D': I2_D, 'I2_DL': I2_DL, 'I2_M': I2_M,
'I3': I3, 'I3_D': I3_D, 'I3_DL': I3_DL, 'I3_M': I3_M,
'I4': I4, 'I4_D': I4_D, 'I4_DL': I4_DL, 'I4_M': I4_M,
'I5': I5, 'I5_D': I5_D, 'I5_DL': I5_DL, 'I5_M': I5_M
}
return sample
def __len__(self):
length = len(self.samples)
return length
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
if __name__ == '__main__':
Kadid10kroot = '/mnt/sda2/New/kadis700k/kadis700k'
index = list(range(0, 129109))
random.shuffle(index)
train_index = index[0:round(0.8 * 129109)]
test_index = index[round(0.8 * 129109):129109]
trainset = Kadis700kFolder_DistortionNet(root=Kadid10kroot, loader=default_loader, index=train_index)
testset = Kadis700kFolder_DistortionNet(root=Kadid10kroot, loader=default_loader, index=test_index)
```
#### File: BVQA-2021/QualityAwarePretrain/QualityAware_FR_Pretrain.py
```python
import os
import torch
import torchvision
import torch.nn as nn
from SCNN import SCNN
from PIL import Image
from scipy import stats
import random
import torch.nn.functional as F
import numpy as np
import time
import scipy.io
import itertools
from torch.optim import lr_scheduler
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
def weight_init(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data,nonlinearity='relu')
# m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data,nonlinearity='relu')
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DBCNN(torch.nn.Module):
def __init__(self, options):
"""Declare all needed layers."""
nn.Module.__init__(self)
# self.features1 = torchvision.models.resnet34(pretrained=True)
self.features1 = torchvision.models.resnet50(pretrained=True)
# weight_init(self.features1)
# Global pooling
self.pooling = nn.AdaptiveAvgPool2d(1)
# Linear classifier.
self.fc = torch.nn.Linear(2048, 1)
if options['fc'] == True:
# Freeze all previous layers.
for param in self.features1.parameters():
param.requires_grad = False
# Initialize the fc layers.
nn.init.kaiming_normal_(self.fc.weight.data)
if self.fc.bias is not None:
nn.init.constant_(self.fc.bias.data, val=0)
def forward(self, X):
"""Forward pass of the network.
"""
N = X.size()[0]
X1 = self.features1.conv1(X)
X1 = self.features1.bn1(X1)
X1 = self.features1.relu(X1)
X1 = self.features1.maxpool(X1)
X1 = self.features1.layer1(X1)
X1 = self.features1.layer2(X1)
X1 = self.features1.layer3(X1)
X1 = self.features1.layer4(X1)
H = X1.size()[2]
W = X1.size()[3]
assert X1.size()[1] == 2048
X1 = self.pooling(X1)
assert X1.size() == (N, 2048, 1, 1)
X1 = X1.view(N, 2048)
X = self.fc(X1)
assert X.size() == (N, 1)
return X
class DBCNNManager(object):
def __init__(self, options, path):
"""Prepare the network, criterion, solver, and data.
Args:
options, dict: Hyperparameters.
"""
print('Prepare the network and data.')
self._options = options
self._path = path
# Network.
self._net = torch.nn.DataParallel(DBCNN(self._options), device_ids=[0]).cuda()
if self._options['fc'] == False:
self._net.load_state_dict(torch.load(path['fc_root']))
print(self._net)
# Criterion.
self._criterion = torch.nn.MSELoss().cuda()
# Solver.
if self._options['fc'] == True:
self._solver = torch.optim.SGD(
self._net.module.fc.parameters(), lr=self._options['base_lr'],
momentum=0.9, weight_decay=self._options['weight_decay'])
else:
self._solver = torch.optim.Adam(
self._net.module.parameters(), lr=self._options['base_lr'],
weight_decay=self._options['weight_decay'])
if (self._options['dataset'] == 'live') | (self._options['dataset'] == 'livec'):
if self._options['dataset'] == 'live':
crop_size = 432
else:
crop_size = 448
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomCrop(size=crop_size),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
# if (self._options['dataset'] == 'live'):
# if self._options['dataset'] == 'live':
# crop_size = 432
# else:
# crop_size = 448
# train_transforms = torchvision.transforms.Compose([
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.RandomCrop(size=crop_size),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225))
# ])
# elif (self._options['dataset'] == 'livec'):
# train_transforms = torchvision.transforms.Compose([
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225))
# ])
elif (self._options['dataset'] == 'csiq') | (self._options['dataset'] == 'tid2013'):
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'mlive':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((570, 960)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'livecp':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'koniq10k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'kadid10k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'kadis700k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'selftrain':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomCrop(size=500),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
# torchvision.transforms.Resize((384, 512)),
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225))
])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
if self._options['dataset'] == 'live':
import LIVEFolder
train_data = LIVEFolder.LIVEFolder(
root=self._path['live'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = LIVEFolder.LIVEFolder(
root=self._path['live'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'csiq':
import CSIQFolder
train_data = CSIQFolder.CSIQFolder(
root=self._path['csiq'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = CSIQFolder.CSIQFolder(
root=self._path['csiq'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
# elif self._options['dataset'] == 'livec':
# import LIVEChallengeFolder2
# train_data = LIVEChallengeFolder2.Koniq10kFolder(
# root=self._path['koniq10k'], loader=default_loader, index=self._options['train_index'],
# transform=train_transforms)
# test_data = LIVEChallengeFolder2.LIVECompressedFolder2(
# root=self._path['livec'], loader=default_loader, index=self._options['test_index'],
# transform=test_transforms)
elif self._options['dataset'] == 'livec':
import LIVEChallengeFolder
train_data = LIVEChallengeFolder.LIVEChallengeFolder(
root=self._path['livec'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = LIVEChallengeFolder.LIVEChallengeFolder(
root=self._path['livec'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'livecp':
import LIVECompressedFolder
train_data = LIVECompressedFolder.LIVECompressedFolder(
root=self._path['livecp'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = LIVECompressedFolder.LIVECompressedFolder(
root=self._path['livecp'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'koniq10k':
import Koniq10kFolder
train_data = Koniq10kFolder.Koniq10kFolder(
root=self._path['koniq10k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Koniq10kFolder.Koniq10kFolder(
root=self._path['koniq10k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'kadid10k':
import Kadid10kFolder
train_data = Kadid10kFolder.Kadid10kFolder(
root=self._path['kadid10k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Kadid10kFolder.Kadid10kFolder(
root=self._path['kadid10k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'kadis700k':
import Kadis700kFolder
train_data = Kadis700kFolder.Kadis700kFolder(
root=self._path['kadis700k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Kadis700kFolder.Kadis700kFolder(
root=self._path['kadis700k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'selftrain':
import SelfTrainFolder
# train_data = SelfTrainFolder.Kadid10kFolder(
# root=self._path['kadid10k'], loader = default_loader, index = self._options['train_index'],
# transform=train_transforms)
train_data = SelfTrainFolder.LIVEChallengeFolder(
root=self._path['livecp'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
# root = self._path['koniq10k'], loader = default_loader, index = self._options['train_index2'],
# transform = train_transforms)
# test_data = SelfTrainFolder.Kadid10kFolder(
# root=self._path['kadid10k'], loader = default_loader, index = self._options['test_index'],
# transform=test_transforms)
test_data = SelfTrainFolder.LIVECompressed2Folder(
root=self._path['livecp'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
# test_data2 = SelfTrainFolder.Koniq10kFolder(
# root=self._path['koniq10k'], loader = default_loader, index = self._options['test_index2'],
# transform=test_transforms)
else:
raise AttributeError('Only support LIVE and LIVEC right now!')
self._train_loader = torch.utils.data.DataLoader(
train_data, batch_size=self._options['batch_size'],
shuffle=True, num_workers=0, pin_memory=True)
self._test_loader = torch.utils.data.DataLoader(
test_data, batch_size=1,
shuffle=False, num_workers=0, pin_memory=True)
self.scheduler = lr_scheduler.StepLR(self._solver,
last_epoch=-1,
step_size=2,
gamma=0.1)
def train(self, iteration):
"""Train the network."""
print('Training.')
best_srcc = 0.0
best_plcc = 0.0
best_epoch = None
print('Epoch\tTrain loss\tTrain_SRCC\tTest_SRCC\tTest_PLCC')
for t in range(self._options['epochs']):
time_start = time.time()
epoch_loss = []
pscores = []
tscores = []
num_total = 0
for X, y in self._train_loader:
# Data.
X = torch.tensor(X.cuda())
y = torch.tensor(y.cuda())
# Clear the existing gradients.
self._solver.zero_grad()
# Forward pass.
score = self._net(X)
loss = self._criterion(score, y.view(len(score), 1).detach())
epoch_loss.append(loss.item())
# Prediction.
num_total += y.size(0)
pscores = pscores + score.cpu().tolist()
tscores = tscores + y.cpu().tolist()
# Backward pass.
loss.backward()
self._solver.step()
train_srcc, _ = stats.spearmanr(pscores, tscores)
test_srcc, test_plcc = self._consitency(self._test_loader)
time_end = time.time()
print('%d epoch done; total time = %f sec' % ((t + 1), (time_end - time_start)))
if test_srcc > best_srcc:
best_srcc = test_srcc
best_plcc = test_plcc
best_epoch = t + 1
print('*', end='')
pwd = os.getcwd()
if self._options['fc'] == True:
modelpath = os.path.join(pwd, 'fc_models', ('net_params' + '_best' + '.pkl'))
else:
modelpath = os.path.join(pwd, 'db_models', ('net_params' + '_best' + '.pkl'))
torch.save(self._net.state_dict(), modelpath)
print('%d\t%4.10f\t%4.3f\t\t%4.4f\t\t%4.4f\t%4.4f' %
(t + 1, self._solver.param_groups[0]['lr'], sum(epoch_loss) / len(epoch_loss), train_srcc, test_srcc, test_plcc))
if self._options['fc'] != True:
self.scheduler.step()
print('Best at epoch %d, test srcc %f' % (best_epoch, best_srcc))
return best_srcc, best_plcc
def _consitency(self, data_loader):
self._net.train(False)
num_total = 0
pscores = []
tscores = []
for X, y in data_loader:
# Data.
X = torch.tensor(X.cuda())
y = torch.tensor(y.cuda())
# Prediction.
score = self._net(X)
pscores = pscores + score[0].cpu().tolist()
tscores = tscores + y.cpu().tolist()
num_total += y.size(0)
test_srcc, _ = stats.spearmanr(pscores, tscores)
tscores = torch.Tensor(tscores).reshape(-1).tolist() # live compressed
test_plcc, _ = stats.pearsonr(pscores, tscores)
self._net.train(True) # Set the model to training phase
return test_srcc, test_plcc
def main():
"""The main function."""
import argparse
parser = argparse.ArgumentParser(
description='Train DB-CNN for BIQA.')
parser.add_argument('--base_lr', dest='base_lr', type=float, default=1e-5,
help='Base learning rate for training.')
parser.add_argument('--batch_size', dest='batch_size', type=int,
default=32, help='Batch size.')
parser.add_argument('--epochs', dest='epochs', type=int,
default=30, help='Epochs for training.')
parser.add_argument('--weight_decay', dest='weight_decay', type=float,
default=5e-4, help='Weight decay.')
parser.add_argument('--dataset', dest='dataset', type=str, default='kadis700k',
help='dataset: live|csiq|tid2013|livec|mlive|livecp|koniq10k|kadid10k|selftrain|kadis700k')
args = parser.parse_args()
if args.base_lr <= 0:
raise AttributeError('--base_lr parameter must >0.')
if args.batch_size <= 0:
raise AttributeError('--batch_size parameter must >0.')
if args.epochs < 0:
raise AttributeError('--epochs parameter must >=0.')
if args.weight_decay <= 0:
raise AttributeError('--weight_decay parameter must >0.')
options = {
'base_lr': args.base_lr,
'batch_size': args.batch_size,
'epochs': args.epochs,
'weight_decay': args.weight_decay,
'dataset': args.dataset,
'fc': [],
'train_index': [],
'test_index': []
}
path = {
'live': os.path.join('dataset', 'F:\dataset\databaserelease2'),
'csiq': os.path.join('dataset', 'S:\dataset\CSIQ'),
'tid2013': os.path.join('dataset', 'TID2013'),
# 'livec': os.path.join('dataset', 'F:\\dataset\\ChallengeDB_release\\Images'),
'livec': os.path.join('dataset', 'F:\\dataset\\ChallengeDB_release'),
'mlive': os.path.join('dataset', 'LIVEmultidistortiondatabase'),
'livecp': os.path.join('dataset', 'F:\\dataset\\LIVECompressed2'), # F:\\dataset\\LIVECompressed2\\level63
'koniq10k': os.path.join('dataset', 'F:\\dataset\\koniq_10k\\author\\koniq10k_512x384'), # 'F:\\dataset\\LIVECompressed2\\Koniq10k_JPEG_distorted_images'),
'kadid10k': os.path.join('dataset', 'F:\\dataset\\KADID-10k\\kadid10k'),
'selftrain': os.path.join('dataset', 'F:\\dataset\\KADID-10k\\kadid10k'),
'kadis700k': os.path.join('dataset', '/mnt/sda2/New/kadis700k/kadis700k'),
'fc_model': os.path.join('fc_models'),
'scnn_root': os.path.join('pretrained_scnn', 'net_params18.pkl'),
'fc_root': os.path.join('fc_models', 'net_params_best.pkl'),
'db_model': os.path.join('db_models'),
'db_root': os.path.join('db_models', 'net_params_best.pkl')
}
if options['dataset'] == 'live':
index = list(range(0, 29))
elif options['dataset'] == 'csiq':
index = list(range(0, 30))
elif options['dataset'] == 'tid2013':
index = list(range(0, 25))
elif options['dataset'] == 'mlive':
index = list(range(0, 15))
# elif options['dataset'] == 'livec':
# index = list(range(0, 10073))
# index_test = list(range(0, 80))
elif options['dataset'] == 'livec':
index = list(range(0, 1162))
elif options['dataset'] == 'livecp':
index = list(range(0, 80))
elif options['dataset'] == 'koniq10k':
index = list(range(0, 10073))
elif options['dataset'] == 'kadid10k':
index = list(range(0, 10125))
elif options['dataset'] == 'kadis700k':
index = list(range(0, 129109)) # 129109
elif options['dataset'] == 'selftrain':
index = list(range(0, 1082))
# index2 = list(range(0, 10073))
index_test = list(range(0, 80))
lr_backup = options['base_lr']
iter_num = 1
srcc_all = np.zeros((1, iter_num + 1), dtype=np.float)
plcc_all = np.zeros((1, iter_num + 1), dtype=np.float)
for i in range(0, iter_num):
best_srcc = 0.0
best_plcc = 0.0
# randomly split train-test set
random.shuffle(index)
train_index = index[0:round(0.8 * len(index))]
test_index = index[round(0.8 * len(index)):len(index)]
options['train_index'] = train_index
options['test_index'] = test_index
pwd = os.getcwd()
# train the fully connected layer only
options['fc'] = True
options['base_lr'] = 1e-3
manager = DBCNNManager(options, path)
best_srcc, best_plcc = manager.train(i)
# fine-tune all model
options['fc'] = False
options['base_lr'] = lr_backup
manager = DBCNNManager(options, path)
best_srcc, best_plcc = manager.train(i)
result_path = os.path.join(pwd, 'result', ('db_result_' + str(i + 1) + '.mat'))
scipy.io.savemat(result_path, mdict={'best_srcc': best_srcc, 'best_plcc': best_plcc})
srcc_all[0][i] = best_srcc
plcc_all[0][i] = best_plcc
srcc_mean = np.mean(srcc_all[0][0:iter_num])
print('average srcc:%4.4f' % (srcc_mean))
plcc_mean = np.mean(plcc_all[0][0:iter_num])
print('average plcc:%4.4f' % (plcc_mean))
srcc_all[0][iter_num] = srcc_mean
plcc_all[0][iter_num] = plcc_mean
print(srcc_all)
print(plcc_all)
final_result_path = os.path.join(pwd, 'result', ('final_result' + '.mat'))
scipy.io.savemat(final_result_path, mdict={'srcc_all': srcc_all, 'plcc_all': plcc_all})
return best_srcc
if __name__ == '__main__':
main()
```
#### File: 2279209430/BVQA-2021/result_analysis.py
```python
import numpy as np
from scipy.optimize import curve_fit
from scipy import stats
from argparse import ArgumentParser
# define 4-parameter logistic regression
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
if __name__ == "__main__":
parser = ArgumentParser(description='Result Analysis for Quality Assessment of In-the-Wild Videos')
parser.add_argument('--lr', type=float, default=5e-4,
help='learning rate (default: 5e-4)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=40,
help='number of epochs to train (default: 40)')
parser.add_argument('--model', default='QPMP', type=str,
help='model name (default: QPMP)')
parser.add_argument('--loss', default='plcc+srcc', type=str,
help='loss type (default: plcc+srcc)')
parser.add_argument('--feature_extractor', default='SpatialMotion', type=str,
help='feature_extractor backbone (default: ResNet-50)')
parser.add_argument('--trained_datasets', nargs='+', type=str, default=['C'],
help="trained datasets (default: ['C', 'K', 'L', 'N', 'Y', 'Q'])")
parser.add_argument('--tested_datasets', nargs='+', type=str, default=['C'],
help="tested datasets (default: ['C', 'K', 'L', 'N', 'Y', 'Q'])")
parser.add_argument('--train_proportion', type=float, default=1.0,
help='the proportion (#total 100%) used in the training set (default: 1.0)')
parser.add_argument('--start_exp_id', default=0, type=int,
help='strat exp id for train-val-test splits (default: 0)')
parser.add_argument('--num_iters', type=int, default=10,
help='the number of train-val-test iterations (default: 10)')
args = parser.parse_args()
for analysis_dataset in args.tested_datasets:
all_results = []
for i in range(args.start_exp_id, args.num_iters):
save_result_file = 'results/{}-{}-{}-{}-{}-{}-{}-{}-{}-EXP{}.npy'.format(args.model, args.feature_extractor, args.loss,
args.train_proportion, args.trained_datasets, args.tested_datasets, args.lr, args.batch_size, args.epochs, i)
test = np.load(save_result_file, allow_pickle=True)
result = test.tolist()
iter_mos = result[analysis_dataset]['sq']
iter_pscore = result[analysis_dataset]['mq']
# logistic regression
beta = [np.max(iter_mos), np.min(iter_mos), np.mean(iter_pscore), 0.5]
popt, _ = curve_fit(logistic_func, iter_pscore, iter_mos, p0=beta, maxfev=100000000)
iter_pscore_logistic = logistic_func(iter_pscore, *popt)
SRCC = stats.spearmanr(iter_mos, iter_pscore)[0]
PLCC = stats.pearsonr(iter_mos, iter_pscore_logistic)[0]
result[analysis_dataset]['SROCC'] = SRCC
result[analysis_dataset]['PLCC'] = PLCC
all_results.append(result[analysis_dataset])
srcc = []
plcc = []
for i in range(args.start_exp_id, args.num_iters):
srcc.append(all_results[i]['SROCC'])
plcc.append(all_results[i]['PLCC'])
print('Performance on {}:'.format(analysis_dataset))
print('median_srcc: {}, median_plcc: {}'.format(np.median(srcc), np.median(plcc)))
print('mean_srcc: {}, mean_plcc: {}'.format(np.mean(srcc), np.mean(plcc)))
``` |
{
"source": "22ema/Real_Time_Mask_Detection",
"score": 3
} |
#### File: Real_Time_Mask_Detection/Video_inference/video_inference.py
```python
import cv2
from inference.inference import *
import timeit
def video_inference(video_file, weigths, cfg):
'''
video's frame inference and show
:param video_file: video file link
:param weigths: inference model file link
:param cfg: model configuration file link
'''
cap = cv2.VideoCapture(video_file)
model = setting_model(cfg, weigths)
if cap.isOpened():
while True:
ret, img = cap.read() # frame read in video
start_t = timeit.default_timer()
output, trans_image = inference(img, model) # inference frame
result_img = post_processing(output, img, trans_image) # visualize bounding box and class in image
terminate_t = timeit.default_timer()
cv2.imshow('result', result_img) # display result image
FPS = int(1./(terminate_t - start_t))
print("FPS:", FPS)
if cv2.waitKey(1) & 0xFF == ord('q'): # to click keyboard's 'q' button for end
break
else:
print("can't open video.")
cap.release()
cv2.destroyAllWindows()
``` |
{
"source": "22hh8/22hh8.github.io-dorm-norms",
"score": 4
} |
#### File: 22hh8/22hh8.github.io-dorm-norms/lab7.py
```python
from PIL import Image
from filter import *
class TargetFilter(PlainFilter):
""" This will only keep pixels that are close to a target color while
the rest of the image turns white."""
__slots__ = ['target']
def __init__(self, base,targetC=Color((.2,.3,.8))): # setting target color
"""Initialize filter based on an image or other filter."""
# call the PlainFilter initializer
self.target = targetC # creating a target slot
super().__init__(base)
def after(self,x,y):
"""Pixel color after filtering."""
(r,g,b) = self.before(x,y).rgb()
(r1,g1,b1) = self.target.rgb()
distance=((r1-r)**2+(g1-g)**2+(b1-b)**2)**.5 # set distance eq
if distance<=.5: #if within distance
return Color( (r,g,b) )
else:
return Color( (1,1,1) ) #turn white
class RedFilter(PlainFilter):
""" Makes Van Gogh's Bedroom painting more red. """
def __init__(self, base):
"""Initialize filter based on an image or other filter."""
# call the PlainFilter initializer
super().__init__(base)
def after(self,x,y):
"""Pixel color after filtering."""
(r,g,b) = self.before(x,y).rgb()
if r > 0 and b > .25: # setting a min to blue helps to make it purple
r += .2
c = Color( ((r+1)/2,g,b) )
return c
class LighterFilter(PlainFilter):
""" This makes an image lighter overall. """
def __init__(self, base):
"""Initialize filter based on an image or other filter."""
# call the PlainFilter initializer
super().__init__(base)
def after(self,x,y):
"""Pixel color becomes whiter after filtering."""
(r,g,b) = self.before(x,y).rgb()
r += 1 # mix with white rgb
g += 1
b += 1
c = Color( (r/2,g/2,b/2) )
return c
class RotateFilter(PlainFilter):
""" Make red green and green blue and blue red """
def __init__(self, base):
"""Initialize filter based on an image or other filter."""
# call the PlainFilter initializer
super().__init__(base)
def after(self,x,y):
"""The new Pixel color after filtering."""
(h,s,v) = self.before(x,y).hsv()
if x >= self.width/2:
h = (h+120)%360
c = Color( (h,s,v), model='hsv' )
return c
class DarkerFilter(PlainFilter):
""" This makes an image darker overall."""
def __init__(self, base):
"""Initialize filter based on an image or other filter."""
# call the PlainFilter initializer
super().__init__(base)
def after(self,x,y):
"""Pixel color after filtering."""
(r,g,b) = self.before(x,y).rgb()
c = Color( (r/2,g/2,b/2) ) # make it darker, closer to 0
return c
if __name__ == '__main__':
# In this section of code, you should demonstrate how your
# filters work.
bedroom = Image.open('images/Bedroom.png')
result = DarkerFilter(bedroom)
result.image().save('DarkerBedroom.png')
``` |
{
"source": "22malvina/consumption",
"score": 2
} |
#### File: consumption/app_layer/tests.py
```python
from django.test import TestCase
from prediction.models import Element, Prediction, PredictionLinear, PredictionLinearFunction
from product_card.models import ProductCard
from offer.models import Offer, ChequeOffer
from company.models import Company, Employee
from cheque.models import FNSCheque, FNSChequeElement, QRCodeReader, ImportProverkachekaComFormatLikeFNS, ShowcasesCategory
from datetime import datetime
from telegram.models import Telegram
#from prediction.tests.Base import list_buy_milk
from prediction.tests import Base as BasePrediction
#print Base.list_buy_milk()
import json
import time
import urllib
import urllib2, base64
from loader.cheque.proverkacheka_com import generate_cheque_qr_codes
class Base(TestCase):
def setUp(self):
self.maxDiff = None
def test_1(self):
"""
Польователь первый раз пользуется сейрвисом
Создать пользователя по клиентским данным
Создать пользовтаелю сомпанию
"""
pety_employee = Employee(title='Pety', key='123zxc')
pety_employee.save()
company_family = Company(title='family')
company_family.save()
company_family.employees.add(pety_employee)
company_family.title = 'test'
self.assertEqual('test', company_family.title)
company_family = Company.objects.get(employees__in=[pety_employee])
self.assertEqual('family', company_family.title)
def test_2(self):
"""
пользователь хочет зашрузить инфо по чеку
Должен авторизоваться
получить сессию
Отправить текс из QR кода
для сохранения вбрана компания по мумолчанию
Получить сохранненый чек из базы по параметрам
"""
#TODO пока проинициализируем так
self.test_1()
#in
key = '123zxc'
qr_text = 't=20201107T2058&s=63.00&fn=9288000100192401&i=439&fp=2880362760&n=1'
self.assertEqual(0, FNSCheque.objects.all().count())
#calc
pety_employee = Employee.objects.get(key=key)
self.assertEqual('Pety', pety_employee.title)
company_family = Company.objects.get(employees__in=[pety_employee])
FNSCheque.import_from_proverkacheka_com_format_like_fns(qr_text, company_family)
self.assertEqual(1, FNSCheque.objects.all().count())
cheque_p = QRCodeReader.qr_text_to_params(qr_text)
cheque = FNSCheque.objects.get(fns_fiscalDocumentNumber=cheque_p['FD'], fns_fiscalDriveNumber=cheque_p['FN'], fns_fiscalSign=cheque_p['FDP'])
cheque.company = Company.objects.get(employees__in=[pety_employee])
cheque.save()
FNSCheque.import_from_proverkacheka_com_format_like_fns(qr_text, company_family)
FNSCheque.import_from_proverkacheka_com_format_like_fns('t=11&s=22&fn=33&i=44&fp=55&n=1', company_family)
self.assertEqual(1, FNSCheque.objects.all().count())
def test_3(self):
"""
Петя хочет загрузить еще несколко чеков
загрцзить все имеющиеся чеки
выбтащить и посмотреть что ему скакжет авто сгереренаяя функйция потребления Молока.
"""
pass
def test_4(self):
"""
импорт из https://proverkacheka.com/check&p=2
получить все чеки
получить последний чек загруденный Робо1
загружать последоватетно страницы из https://proverkacheka.com/check&p=2 до тех пор пока не найдем на странице последний чек
загрузить страницу
вытащить из нее чеки
преобразовав чеки в формат для сравнения как текст из QR кода
параметров всего 5
попыиаиься найти нужный чек
если не нашли загрузить следубщую, ограничимся 100 страницами, но первый раз гораничение в 7000
сохранить их в репозиторий от имени Робо1
получить список QR кодов скормить его стандартном мехаизму сохранения чеков у пользователей
потом возможно использовать их для показа похожих товаров в других магазинах и рекомедации цены
"""
robo1_employee = Employee(title='robo1', key='<KEY>')
robo1_employee.save()
company_family = Company(title='family')
company_family.save()
company_family.employees.add(robo1_employee)
last_fns_cheques = FNSCheque.objects.filter(company=company_family).order_by('-id')
has_last_fns_cheques = False
if last_fns_cheques:
has_last_fns_cheques = True
last_fns_cheque = last_fns_cheques[0]
#TODO таким образом будем ломать тест чтобы помнить о необходимости исправления
# cheque_params = generate_cheque_qr_codes(has_last_fns_cheques, last_fns_cheques)
#
# self.assertEqual(0, FNSCheque.objects.all().count())
# self.assertEqual(0, FNSChequeElement.objects.all().count())
# for s in cheque_params:
# print s
# #TODO нужно передовать компанию для которой сохряняются чеки
# FNSCheque.import_from_proverkacheka_com_format_like_fns(s, company_family)
# #print 'sleep'
# #time.sleep(10)
# self.assertEqual(50, FNSCheque.objects.all().count())
# self.assertTrue(99 < FNSChequeElement.objects.all().count())
def test_5(self):
"""
зайти на ресурс и посмотреть офферы каких витирин воообще присутсвуют.
1. товары можно искать тестовой строкой
2. дерево категорий товаров
"""
company_family = Company(title='family')
company_family.save()
for i in BasePrediction.list_buy_milk():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
for i in BasePrediction.list_buy_cheese():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
#создавать офферы на основе имеющихся сущностей
text = u'СЫР'
offers = ChequeOffer.find(text)
self.assertEqual([
{u'datetime': {u'update': u'2020-05-28T22:51:00'},
u'price': {u'one': 13500, u'per_one_gram': 61363.64},
u'product': {u'title': u'4607004891694 \u0421\u042b\u0420 \u0421\u041b\u0418\u0412\u041e\u0427\u041d\u042b\u0419 HOCHLA'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-28T22:51:00'},
u'price': {u'one': 13990, u'per_one_gram': 55960.0},
u'product': {u'title': u'8600742011658 \u0421\u042b\u0420 \u0421\u0415\u0420\u0411\u0421\u041a\u0410\u042f \u0411\u0420\u042b\u041d\u0417\u0410'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-15T20:45:00'},
u'price': {u'one': 49990, u'per_one_gram': 49990.0},
u'product': {u'title': u'2364939000004 \u0421\u042b\u0420 \u041a\u041e\u0420\u041e\u041b\u0415\u0412\u0421\u041a\u0418\u0419 51%'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-10T21:08:00'},
u'price': {u'one': 59990, u'per_one_gram': 59990.0},
u'product': {u'title': u'2316971000009 \u0421\u042b\u0420 \u041c\u0410\u0410\u0421\u0414\u0410\u041c 45% \u0418\u0427\u0410\u041b'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-04-21T14:04:00'},
u'price': {u'one': 50306, u'per_one_gram': 50306.0},
u'product': {u'title': u'2372240000002 \u0421\u042b\u0420 \u0413\u0420\u0410\u041d\u0414 SPAR 45%'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-04-21T14:04:00'},
u'price': {u'one': 37670, u'per_one_gram': 37670.0},
u'product': {u'title': u'2364178000001 \u0421\u042b\u0420 \u041c\u0410\u0410\u0421\u0414\u0410\u041c 45% \u041f\u0420\u0415'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 49900, u'per_one_gram': 49900.0},
u'product': {u'title': u'\u0421\u042b\u0420 \u0420\u041e\u0421\u0421\u0418\u0419\u0421\u041a\u0418\u0419 45%'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 18900, u'per_one_gram': 75600.0},
u'product': {u'title': u'\u0421\u042b\u0420 \u041c\u0410\u0421\u041a\u0410\u0420.80% 250\u0413\u0420'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0421 \u041b\u0423\u041a\u041e\u041c 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0412\u041e\u041b\u041d\u0410 45% 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3899, u'per_one_gram': 48737.5},
u'product': {u'title': u'\u041f\u041b.\u0421\u042b\u0420 \u042f\u041d\u0422\u0410\u0420\u042c \u0424\u041e\u041b80\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 15900, u'per_one_gram': 39750.0},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0424\u0415\u0422\u0410\u041a\u0421\u0410 400\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0412\u041e\u041b\u041d\u0410 45% 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0412\u041e\u041b\u041d\u0410 45% 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3899, u'per_one_gram': 48737.5},
u'product': {u'title': u'\u041f\u041b.\u0421\u042b\u0420 \u042f\u041d\u0422\u0410\u0420\u042c \u0424\u041e\u041b80\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-06-03T14:50:00'},
u'price': {u'one': 59899, u'per_one_gram': 59899.0},
u'product': {u'title': u'2364178000001 \u0421\u042b\u0420 \u041c\u0410\u0410\u0421\u0414\u0410\u041c 45% \u041f\u0420\u0415'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-06-03T14:50:00'},
u'price': {u'one': 20000, u'per_one_gram': 50000.0},
u'product': {u'title': u'4607004892677 \u0421\u042b\u0420 HOCHLAND \u041c\u042f\u0413\u041a\u0418\u0419'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}}
], offers)
def test_6(self):
"""
получить последний по дате оффер, минимальный, и максимальный за последнюю неделю/месяц/квартал/год/все время , 201213 1212 для адреса
"""
company_family = Company(title='family')
company_family.save()
for i in BasePrediction.list_buy_milk():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
for i in BasePrediction.list_buy_cheese():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
#создавать офферы на основе имеющихся сущностей
text = u'МОЛОКО'
#text = u'СЫР'
offers = ChequeOffer.find(text)
print "==================================="
#self.assertEqual([], offers)
#2) как то их группируем - наверно в рамках одной витрины (инн + ритеил адресс), но даже если оффер меняется(изменилось название в чеке - это происходит очень редко и не важно, даже больше скажем что важно только последнее) т.е. если название в рамках одного магазина разные то это разные продукты
#{u'datetime': {u'update': u'2020-06-03t14:50:00'},
#u'price': {u'one': 20000, u'per_one_gram': 50000.0},
#u'product': {u'title': u'4607004892677 \u0421\u042b\u0420 hochland \u041c\u042f\u0413\u041a\u0418\u0419'},
#u'showcase': {u'address': '107076, \xd0\xb3.\xd0\x9c\xd0\xbe\xd1\x81\xd0\xba\xd0\xb2\xd0\xb0, \xd1\x83\xd0\xbb.\xd0\x91\xd0\xbe\xd0\xb3\xd0\xbe\xd1\x80\xd0\xbe\xd0\xb4\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9 \xd0\x92\xd0\xb0\xd0\xbb, \xd0\xb4.6, \xd0\xba\xd0\xbe\xd1\x80\xd0\xbf.2'}}
offer_analytics = ChequeOffer.analytics_last_min_max_price(offers)
self.assertEqual([
{u'last_datetime': u'2020-06-03T14:50:00',
u'count': 2,
u'price_analytics': {u'last': {u'one': 5990, u'per_one_gram': 5990.0},
u'max': {u'one': 6990, u'per_one_gram': 6990.0},
u'min': {u'one': 5990, u'per_one_gram': 5990.0}},
u'product': {u'title': u'4607045982771 \u041c\u041e\u041b\u041e\u041a\u041e SPAR \u0423\u041b\u042c\u0422\u0420\u0410\u041f\u0410'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'last_datetime': u'2020-05-24T12:56:00',
u'count': 1,
u'price_analytics': {u'last': {u'one': 4990, u'per_one_gram': 4990.0},
u'max': {u'one': 4990, u'per_one_gram': 4990.0},
u'min': {u'one': 4990, u'per_one_gram': 4990.0}},
u'product': {u'title': u'4607045982788 \u041c\u041e\u041b\u041e\u041a\u041e SPAR \u0423\u041b\u042c\u0422\u0420\u0410\u041f\u0410'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'last_datetime': u'2020-05-15T20:45:00',
u'count': 1,
u'price_analytics': {u'last': {u'one': 5490, u'per_one_gram': 5935.14},
u'max': {u'one': 5490, u'per_one_gram': 5935.14},
u'min': {u'one': 5490, u'per_one_gram': 5935.14}},
u'product': {u'title': u'4607167840416 \u041c\u041e\u041b\u041e\u041a\u041e SPAR 3,2% 925'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{
u'count': 2,
u'last_datetime': u'2020-05-10T21:08:00',
u'price_analytics': {u'last': {u'one': 8990, u'per_one_gram': 5288.24},
u'max': {u'one': 8990, u'per_one_gram': 5288.24},
u'min': {u'one': 8990, u'per_one_gram': 5288.24}},
u'product': {u'title': u'4607167841154 \u041c\u041e\u041b\u041e\u041a\u041e SPAR 2,5% 1,7'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{
u'count': 1,
u'last_datetime': u'2020-05-06T21:53:00',
u'price_analytics': {u'last': {u'one': 5990, u'per_one_gram': 5990.0},
u'max': {u'one': 5990, u'per_one_gram': 5990.0},
u'min': {u'one': 5990, u'per_one_gram': 5990.0}},
u'product': {u'title': u'4690228007842 \u041c\u041e\u041b\u041e\u041a\u041e \u0414\u041e\u041c\u0418\u041a \u0412 \u0414\u0415\u0420\u0415\u0412'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{
u'count': 1,
u'last_datetime': u'2020-05-23T21:58:00',
u'price_analytics': {
u'last': {
u'one': 7990, u'per_one_gram': 5707.14
},
u'max': {u'one': 7990, u'per_one_gram': 5707.14},
u'min': {u'one': 7990, u'per_one_gram': 5707.14}},
u'product': {u'title': u'\u041c\u041e\u041b\u041e\u041a\u041e \u041f\u0410\u0421\u0422.3,7%1400'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
], offer_analytics)
def test_7(self):
"""
1. создать чек на основе строки полученой из QR кода
2. обновить данные в чеке из проверка чеков ком
"""
company_family = Company(title='family')
company_family.save()
qr_text = 't=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1'
fns_cheque = FNSCheque.create_fns_cheque_from_qr_text(qr_text, company_family)
fns_cheque.save()
self.assertEqual('', fns_cheque.fns_userInn)
self.assertEqual('', FNSCheque.objects.get(fns_fiscalDocumentNumber='115180').fns_userInn)
fns_cheque_json = ImportProverkachekaComFormatLikeFNS.get_fns_cheque_by_qr_params('', qr_text)
self.assertTrue(FNSCheque.has_cheque_with_fiscal_params(company_family,
fns_cheque_json["document"]["receipt"]["fiscalDocumentNumber"],
fns_cheque_json["document"]["receipt"]["fiscalDriveNumber"],
fns_cheque_json["document"]["receipt"]["fiscalSign"],
fns_cheque_json["document"]["receipt"]["dateTime"],
fns_cheque_json["document"]["receipt"].get("totalSum", 'Error')))
self.assertTrue(FNSCheque.has_cheque_with_fns_cheque_json(company_family, fns_cheque_json))
#FNSCheque.update_cheque_from_json(fns_cheque, fns_cheque_json)
fns_cheque.update_cheque_from_json(fns_cheque_json)
self.assertEqual(u'5258056945', fns_cheque.fns_userInn)
self.assertEqual(u'5258056945', FNSCheque.objects.get(fns_fiscalDocumentNumber='115180').fns_userInn)
def test_8(self):
"""
Смоделировать ввидение команд новым пользователем.
/start
t=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1
t=20200506T215300&s=1351.88&fn=9285000100127255&i=83300&fp=328049751&n=1
t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1
/cheques
что приходит от пользователя?
сообщение из ТГ, которое содержит запрос к боту.
который может не ожидать ответ
может ожидать мгновенный ответ
может ожидать отложенный ответ
может ожидать несколько отложенных ответов
"""
company_family = Company(title='family')
company_family.save()
#full_message = {}
#chat_id = full_message['message']['chat']['id']
#message = full_message['message']['text']
#telegram_user_id = message['message']['from']['id']
#first_name = message['message']['from'].get('first_name', '')
#last_name = message['message']['from'].get('last_name', '')
#language_code = message['message']['from'].get('language_code', '')
qr_tests = [
't=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1',
't=20200506T215300&s=1351.88&fn=9285000100127255&i=83300&fp=328049751&n=1',
't=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1',
'test t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1 test',
u'Привет test t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1 test',
u"""
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ывage.find('t') >= 0 and \
message.find('s') >= 0 and \
message.find('fn') >= 0 and \
message.find('fp') >= 0 and \
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ывage.find('t') >= 0 and \
message.find('s') >= 0 and \
message.find('fn') >= 0 and \
message.find('fp') >= 0 and \
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ыв
""",
'print "====", message.en',
'dsf ds fvt=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sdf ds',
u'spent; 101.01; буреговы; мак авто',
u'spend; 60; кофе; ван бакс тверская 7; 2020.12.19 18:46',
u'spend; 60; кофе; ван бакс; 2020.12.15',
]
chat_id = 'Test chat id = 100500'
chat_id = '383332826'
for qr_test in qr_tests:
message = qr_test
print '====', message.encode('utf8')
Telegram.process_message(company_family, chat_id, message)
Telegram.process_message(company_family, chat_id, '/list')
Telegram.process_message(company_family, chat_id, '/cheque 2')
Telegram.process_message(company_family, chat_id, '/cheque_1')
Telegram.process_message(company_family, chat_id, '/cheque3')
def test_9(self):
"""
пробуем подучить сообщения из телеграмма
"""
Telegram.process_last_messages_from_bot()
Telegram.process_last_messages_from_bot()
def test_10(self):
"""
вделим 5(6 - дата и время) нужных параметров и запроса
"""
#t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1
s2 = [
'20201216T1818 29.00 9280440301295284 236 3107860384',
#'2020.12.16 18:18:01 29.00 9280440301295284 236 3107860384',
'2020.12.16 18:18 29.00 9280440301295284 236 3107860384',
'18:18 2020.12.16 29.00 9280440301295284 236 3107860384',
'18:18 16.12.2020 29.00 9280440301295284 236 3107860384',
'18:18 2020.12.16 29.00 9280440301295284 236 3107860384',
]
s1 = '18:18 2020.12.16 29.00 9280440301295284 236 3107860384'
import re
t1 = s1.split(' ')
p = {}
for j in t1:
s = re.findall(u'^(\d+\.\d{1,2})$', j)
fn = re.findall(u'^(\d{16})$', j)
fp = re.findall(u'^(\d{9,10})$', j)
i = re.findall(u'^(\d{1,6})$', j)
time = re.findall(u'^(\d{1,2}\:\d{1,2}\:?\d{1,2})$', j)
date_1 = re.findall(u'^(\d{4}\.\d{1,2}\.\d{1,2})$', j)
date_2 = re.findall(u'^(\d{1,2}\.\d{1,2}\.\d{4})$', j)
if s:
p['s'] = re.findall(u'^(\d+\.\d{1,2})$', j)[0]
elif fn:
p['fn'] = re.findall(u'^(\d{16})$', j)[0]
elif fp:
p['fp'] = re.findall(u'^(\d{9,10})$', j)[0]
elif i:
p['i'] = re.findall(u'^(\d{1,6})$', j)[0]
elif time:
p['time'] = re.findall(u'^(\d{1,2}\:\d{1,2}\:?\d{1,2})$', j)[0]
elif date_1:
p['date'] = re.findall(u'^(\d{4}\.\d{1,2}\.\d{1,2})$', j)[0]
elif date_2:
p['date'] = re.findall(u'^(\d{1,2}\.\d{1,2}\.\d{4})$', j)[0]
p['date'] = p['date'][4:8] + p['date'][2:4] + p['date'][0:2]
else:
print 'Alert not identify' + j
#if not p['s']:
#p['s'] = re.findall(u'^(\d+\.\d{1,2})$', i)
#p['s'] = re.findall(u'(\d+\.\d{1,2})', s1)[0]
#p['fn'] = re.findall(u'(\d{16})', s1)[0]
#p['fp'] = re.findall(u'(\d{9,10})', s1)[0]
#p['i'] = re.findall(u'(\d{1,6})', s1)[0]
#p['time'] = re.findall(u'(\d{1,2}\:\d{1,2}\:?\d{1,2})', s1)[0]
#p['date'] = re.findall(u'(\d{4}\.\d{1,2}\.\d{1,2})', s1)[0]
p['t'] = ''.join(p['date'].split('.')) + 'T' + ''.join(p['time'].split(':'))
del p['time']
del p['date']
self.assertEqual({'s': '29.00', 'fn': '9280440301295284', 'fp': '3107860384', 'i': '236', 't':'20201216T1818'}, p)
self.assertEqual({'s': '29.00', 'fn': '9280440301295284', 'fp': '3107860384', 'i': '236', 't':'20201216T1818'}, QRCodeReader.parse_1(s1))
for s in s2:
print s
self.assertEqual({'s': '29.00', 'fn': '9280440301295284', 'fp': '3107860384', 'i': '236', 't':'20201216T1818'}, QRCodeReader.parse_1(s))
def test_11(self):
company_family = Company(title='family')
company_family.save()
tests = [
'20201216T1818 29.00 9280440301295284 236 3107860384',
#'2020.12.16 18:18:01 29.00 9280440301295284 236 3107860384',
'2020.12.16 18:18 29.00 9280440301295284 236 3107860384',
'18:18 2020.12.16 29.00 9280440301295284 236 3107860384',
'18:18 16.12.2020 29.00 9280440301295284 236 3107860384',
'18:18 2020.12.16 29.00 9280440301295284 236 3107860384',
]
chat_id = '383332826'
for test in tests:
message = test
Telegram.process_message(company_family, chat_id, message)
def test_12(self):
sh = ShowcasesCategory(title=u'Кафе и рестораны')
sh.save()
sh = ShowcasesCategory(title=u'Гипермаркет')
sh.save()
company_family = Company(title='family')
company_family.save()
qr_tests = [
't=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1',
't=20200506T215300&s=1351.88&fn=9285000100127255&i=83300&fp=328049751&n=1',
'/cscat_1_2',
'/cheque_1',
't=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1',
'test t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1 test',
u'Привет test t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1 test',
u"""
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ывage.find('t') >= 0 and \
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ывage.find('t') >= 0 and \
message.find('fp') >= 0 and \
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ыв
""",
'print "====", message.en',
'dsf ds fvt=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sdf ds',
# u'spent; 101.01; буреговы; мак авто',
# u'spend; 60; кофе; ван бакс тверская 7; 2020.12.19 18:46',
# u'spend; 60; кофе; ван бакс; 2020.12.15',
'/list_by_month',
'/list_nice',
't=20200515T204500&s=2091.85&fn=9285000100127484&i=89243&fp=1167631633&n=1',
]
chat_id = 'Test chat id = 100500'
chat_id = '383332826'
for qr_test in qr_tests:
message = qr_test
print '====', message.encode('utf8')
Telegram.process_message(company_family, chat_id, message)
```
#### File: consumption/prediction/tests.py
```python
from django.test import TestCase
from models import Element, Prediction, PredictionLinear, PredictionLinearFunction
from product_card.models import ProductCard
from offer.models import Offer
from company.models import Company, Employee
from cheque.models import FNSCheque, FNSChequeElement
from datetime import datetime
class Base(TestCase):
def setUp(self):
self.maxDiff = None
p = ProductCard(title="МОЛОКО ДВД 3.2%")
p.save()
p = ProductCard(title="МОЛОКО SPAR 3.2% 1.7 л.")
p.save()
p = ProductCard(title="МОЛОКО Простоквашино 3.2%")
p.save()
p = ProductCard(title="Творог SPAR 9%")
p.save()
@staticmethod
def list_buy_cheese():
return [
{
'items': [
{"weight": 220, "quantity":1,"sum":13500,"price":13500,"name":u"4607004891694 СЫР СЛИВОЧНЫЙ HOCHLA", "volume": 0.22},
{"weight": 250, "quantity":1,"sum":13990,"price":13990,"name":u"8600742011658 СЫР СЕРБСКАЯ БРЫНЗА", "volume": 0.25},
],
"retailPlaceAddress":u"107076, г.Москва, ул.Богородский Вал, д.6, корп.2",
"dateTime":"2020-05-28T22:51:00",
},
{
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
"dateTime":"2020-05-15T20:45:00",
"items":[
{"weight": None, "quantity":0.278,"name":u"2364939000004 СЫР КОРОЛЕВСКИЙ 51%","price":49990,"sum":13897, "volume": 1},
],
},
{
"dateTime":"2020-05-10T21:08:00",
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
"items":[
{"sum":16797,"price":59990,"quantity":0.28,"name":u"2316971000009 СЫР МААСДАМ 45% ИЧАЛ", "volume": 1},
],
},
{
"items":[
{"quantity":0.248,"sum":12476,"name":u"2372240000002 СЫР ГРАНД SPAR 45%","price":50306, "volume": 1},
{"quantity":0.264,"sum":9945,"name":u"2364178000001 СЫР МААСДАМ 45% ПРЕ","price":37670, "volume": 1},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
"dateTime":"2020-04-21T14:04:00",
},
{
"userInn":"7703270067",
"items":[
{"calculationTypeSign":4,"ndsRate":2,"name":u"СЫР РОССИЙСКИЙ 45%","quantity":0.299,"ndsSum":1356,"calculationSubjectSign":1,"sum":14920,"price":49900, "volume": 1},
{"weight": 250, "calculationTypeSign":4,"ndsRate":2,"name":u"СЫР МАСКАР.80% 250ГР","quantity":2,"ndsSum":3436,"calculationSubjectSign":1,"sum":37800,"price":18900, "volume": 0.25},
{"weight": 90, "calculationTypeSign":4,"ndsRate":2,"name":u"СЫР ПЛ С ЛУКОМ 90Г","quantity":2,"ndsSum":618,"calculationSubjectSign":1,"sum":6798,"price":3399, "volume": 0.09},
{"weight": 90, "calculationTypeSign":4,"ndsRate":2,"name":u"СЫР ПЛ ВОЛНА 45% 90Г","quantity":4,"ndsSum":1236,"calculationSubjectSign":1,"sum":13596,"price":3399, "volume": 0.09},
],
"dateTime":"2020-05-23T21:58:00",
},
{
"dateTime":"2020-05-02T20:56:00",
"userInn":"7703270067",
"items":[
{"weight": 80, "sum":3899,"price":3899,"ndsRate":2,"calculationTypeSign":4,"ndsSum":354,"calculationSubjectSign":1,"name":u"ПЛ.СЫР ЯНТАРЬ ФОЛ80Г","quantity":1, "volume": 0.08},
{"weight": 400, "sum":15900,"price":15900,"ndsRate":2,"calculationTypeSign":4,"ndsSum":1445,"calculationSubjectSign":1,"name":u"СЫР ПЛ ФЕТАКСА 400Г","quantity":1, "volume": 0.4},
{"sum":3399,"price":3399,"ndsRate":2,"calculationTypeSign":4,"ndsSum":309,"calculationSubjectSign":1,"name":u"СЫР ПЛ ВОЛНА 45% 90Г","quantity":1, "volume": 0.09},
{"sum":3399,"price":3399,"ndsRate":2,"calculationTypeSign":4,"ndsSum":309,"calculationSubjectSign":1,"name":u"СЫР ПЛ ВОЛНА 45% 90Г","quantity":1, "volume": 0.09},
{"sum":3899,"price":3899,"ndsRate":2,"calculationTypeSign":4,"ndsSum":354,"calculationSubjectSign":1,"name":u"ПЛ.СЫР ЯНТАРЬ ФОЛ80Г","quantity":1, "volume": 0.08},
],
},
{
"retailPlaceAddress":u"107076, г.Москва, ул.Богородский Вал, д.6, корп.2",
"userInn":"5258056945",
"items":[
{"price":59899,"name":u"2364178000001 СЫР МААСДАМ 45% ПРЕ","quantity":0.356,"sum":21324, "volume": 1},
{"weight": 400, "price":20000,"name":u"4607004892677 СЫР HOCHLAND МЯГКИЙ","quantity":1,"sum":20000, "volume": 0.4}, # возсожно 480 разные днные
],
"dateTime":"2020-06-03T14:50:00",
},
]
def __list_buy_milk(self):
return Base.list_buy_milk()
@staticmethod
def list_buy_milk():
return [
{
"fiscalDocumentNumber":148436,
"taxationType":1,
"nds10":13873,
"shiftNumber":386,
"kktRegId":"0001734115017264",
"operationType":1,
"ecashTotalSum":207870,
"requestNumber":182,
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
"receiptCode":3,
"fiscalSign":2166849586,
"operator":"<NAME>",
"userInn":"5258056945",
"user":"ООО \"<NAME>\"",
"fiscalDriveNumber":"9285000100127782",
"cashTotalSum":0,
"nds18":9212,
"totalSum":207870,
"dateTime":"2020-06-03T14:50:00",
'items': [
{"price":5990,"name":u"4607045982771 МОЛОКО SPAR УЛЬТРАПА","quantity":3,"sum":17970, 'volume': 1},
],
},
{
"dateTime":"2020-05-28T22:51:00",
'items': [
{"weight": 1000, "quantity":2,"sum":11980,"price":6990,"name":u"4607045982771 МОЛОКО SPAR УЛЬТРАПА", 'volume': 1},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
},
{
"dateTime":"2020-05-24T12:56:00",
'items': [
{"weight": 1000, "quantity":1,"name":u"4607045982788 МОЛОКО SPAR УЛЬТРАПА","price":4990,"sum":4990, 'volume': 1},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
},
{
"dateTime":"2020-05-23T21:58:00",
'items': [
{"weight": 1400, "calculationTypeSign":4,"ndsRate":2,"name":u"МОЛОКО ПАСТ.3,7%1400","quantity":1,"ndsSum":726,"calculationSubjectSign":1,"sum":7990,"price":7990, 'volume': 1.4},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
},
{
"dateTime":"2020-05-15T20:45:00",
'items': [
{"weight": 925, "quantity":1,"name":u"4607167840416 МОЛОКО SPAR 3,2% 925","price":5490,"sum":5490, 'volume': 0.925},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
},
{
"dateTime":"2020-05-10T21:08:00",
'items': [
{"weight": 1700, "sum":8990,"price":8990,"quantity":1,"name":u"4607167841154 МОЛОКО SPAR 2,5% 1,7", 'volume': 1.7},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
},
{
"dateTime":"2020-05-06T21:53:00",
'items': [
{"quantity":1,"name":u"4607167841154 МОЛОКО SPAR 2,5% 1,7","price":8990,"sum":8990, 'volume': 1.7},
{"weight": 950,"quantity":1,"name":u"4690228007842 МОЛОКО ДОМИК В ДЕРЕВ","price":5990,"sum":5990, 'volume': 1},
],
"retailPlaceAddress":u"г.Москва, ул.Богородский Вал, д.6, корп.2",
},
#{
# "dateTime":"2020-03-28T19:58:00",
# 'items': [
# {"sum":4990,"price":4990,"name":u"4607167840416 МОЛОКО SPAR 3,2% 925","quantity":1, 'volume': 0.925},
# ],
# "retailPlaceAddress":u"Ашан",
#},
]
def test_linear_forecasting_for_the_whole_period(self):
"""
Попробуем линейное прогнозированите на всем периоде
Идет потребление некоторых ресурсов постоянно но не очень системно.
К примеру молоко.
Покупается несколько раз внеделю.
Изменять потребление не планируется.
Попробуем линейно апроксимировать данную функцию.
добавил нужгыеразмеры в нашем случае молоко употребляется в литрахт 'volume': 1
сначала получим среднее потребление молока в день.
просумируем все потребление и поделимгачисло дней закоторое было произведено потребление
получили в итоге производную
далее попробуем полчить производную исключив один элемет потребления и сокративчисло дней до момента его приобретения
затем рассчитам на сколько днейхватит послежней покупк если потребление будет таким же
сейчас рассматриваем простую можель когда потребление рассчитываеися от даты первой покупки до даты последней покупки.
прибавим к дельте котороая между первой и последней покупкой, количество дней накоторых хватит последей покупки и получим дату когда нужно установить хакупку в календаре.
"""
delta_days = 40
delta_days_2 = 35
list_buy_milk = self.__list_buy_milk()
elements = []
for i in list_buy_milk:
for j in i['items']:
elements.append(Element(j['name'], i['dateTime'], j['volume']*j['quantity']))
#self.assertEqual('', elements)
self.assertEqual(0.318, Prediction.average_weight_per_day_in_during_period(elements, delta_days))
pl = PredictionLinear(elements)
self.assertEqual(30, pl.delta_days())
self.assertEqual(27, pl.without_last_delta_days())
self.assertEqual(0.424, pl.average_weight_per_day_in_during_period())
#self.assertEqual(0.3, Prediction.average_weight_per_day_in_during_period(elements[1:], delta_days_2))
delta = Prediction.average_weight_per_day_in_during_period(elements[1:], delta_days_2)
self.assertEqual(0.278, delta)
pl_2 = PredictionLinear(elements[1:])
delta = pl_2.average_weight_per_day_in_during_period()
self.assertEqual(0.324, delta)
#self.assertEqual(10.0, elements[0].get_weight() / delta)
days_continue_for_last_buy = float("{0:.3f}".format(elements[0].get_weight() / delta))
self.assertEqual(9.259, days_continue_for_last_buy)
delta_days_future = pl.days_future()
self.assertEqual(8.333, delta_days_future)
today_is = 44.259 #какойто день месяца в который долженсработать напоминание что сегоднявсе кончится.
self.assertEqual(today_is, delta_days_2 + days_continue_for_last_buy)
def test_2(self):
"""
Найти какрточки продуктов которые подойдут для данной функии потребления.
Функция потребления состоит из:
1) ресурсы которые в нее включены
а также продукты на которые ссылаются включенные ресурсы
2) учитывает все ресурсов которые обработаны в рамках данной фирм и не включенных в эту функцию.
после обработки(добавления в другую ф. слеует подождать несколько дней так возможно еще не успели оработать)
простой случайесли ресурс добавлен в другую ф. в эту его автоматом не предлагать
3) ресурсов предложенных но отвергнутых пользователем
4) продуктов предложенных но отвергнутых пользоватеоем
5) есть авто функции которые есть у всех пользоватеей МОЛОКО 3.2% в них автоматом для рекомедаций добавлять все продукты и ресурсы с таким покащатеоем.
Как лучше сделать?
1) привязть определенный ресурс
тогда придется по ресурсу определять
1.1) продкуты с которыми он связан или к которые подходит.
2) привязать карточку продукта
тогда все ресурсы привязнные к карточе добавятся атоматом - НЕТ
но легко будет искать то чем функцию расширить, т.е. аналоги - ДА
Нужна ли функции явная связь с карточками продуктов?
+
1) упрощает поиск продуктов для замещения
-
1) пользователю придется привязывать все альтернативные продукты
"""
# получаю список ресурсов функции
list_buy_milk = self.__list_buy_milk()
elements = []
for i in list_buy_milk:
for j in i['items']:
elements.append(Element(j['name'], i['dateTime'], j['volume']*j['quantity']))
prediction = PredictionLinear(elements)
product_cards = prediction.product_cards()
#self.assertEqual([ProductCard(), ProductCard(), ProductCard()], product_cards)
self.assertEqual(set([ProductCard.objects.get(id=1), ProductCard.objects.get(id=2), ProductCard.objects.get(id=3)]), product_cards)
def test_3(self):
"""
показать пользователю все имеющиеся предложения на основе полученных карточек продуктов
"""
print 'test 3'
product_cards = ProductCard.objects.all()
self.assertEqual(4, len(product_cards))
offers = Offer.objects.filter(product_card__in=product_cards)
self.assertEqual([], list(offers))
def test_4(self):
"""
Создать офферы на основе
1 чеков - человек
найти по строке из чека, стоимости, магазину - подходящие продукты
выбрать продукты для клоторых обновится оффер, так как это разовое событие то оно не может меняться а только меняться с временем
карточек продуктов может быть найдно много(так как пользователи могут создавать разные карточки)
Обойти это можно создав супер карточку которую пользователи немогут менять но ккоторой могут привязывать свою
и эти супер карточки будут в отдельном дереве
обновлять оыыер у 3 из 1000 карточек которые выбрал польователь.
2 ресурсов - человек
найти на основе ресурса - подходящие продукты
что бы сделать оффер нужна цена, а у ресурса ее нет, так что не походит
но возможно стоит ввести отдельную сущность приоретения где фиксируются ресурсы и стоимости их приобретения.
3 ресурса основанного на чеке
найти на основе ресурса и связанного с ним чека - продукты
4 импорта с сайтов - робот
кадый робот знает как карточка продукта связана с элементом витрины к которой он подключен, для получения оффера
елемент витрины для сайта это может быть страница товара
а иакже есть другме роботы которые обновляют информацию по карточек если на витрине она меняется - полу ручной режим
"""
print 'test 4'
#1
#ChequeElement.objects.filter(
list_buy_milk = self.__list_buy_milk()
elements = []
for i in list_buy_milk:
for j in i['items']:
elements.append({'title': j['name'], 'price': j['price'], 'datetime_buy': i['dateTime'], 'showcase': i['retailPlaceAddress']})
#TODO тут идет ручной выбор карточек к которым надо создать оффер
for e in elements:
for product_card in ProductCard.find_by_text(e['title']):
offer = Offer(product_card=product_card, price=e['price'], datetime=e['datetime_buy'], showcase=e['showcase'])
offer.save()
#for o in Offer.objects.all():
# print o
product_cards = ProductCard.objects.all()
self.assertEqual(4, len(product_cards))
offers = Offer.objects.filter(product_card__in=product_cards)
#TODO результат наверно не верный так как продуктов похожих может быть больше
self.assertEqual(24, len(offers))
def test_5(self):
"""
показать пользователю все имеющиеся предложения на основе полученных карточек продуктов
"""
print 'test 5'
product_cards = ProductCard.objects.all()
self.assertEqual(4, len(product_cards))
offers = Offer.objects.filter(product_card__in=product_cards)
self.assertEqual([], list(offers))
def test_6(self):
"""
<NAME> хочет начаить вести учеи трат на продукты и при этом экономить.
1. Для этого ему надо занести все траты семьи в систему.
2. Разрешить использовать информацию о его закупках дргим пользователям.
пусть меряются кто больше сикономил на своей продуктовой корзине на этой недел, в этом месяце, за год.
3. делать покупки там и в таком колличестве которые основаны на рекомендциях системы.
сфрмировать оптимальную корзину продуктов с запасом на месяц вперед.
показать сколько тратится продуктов в анной фйнкции в среднем в день и
показать посление 2 цены закупки вжанной категории, среднюю цену за неделю, месяц год.
а также самуюю дешовую цену и место из тех магазинов где он покупал посодедний месяц, потом самую дешовую цену в его городе, в стране, в мире.
показать с колько сэкономит если купит сейчас там на месяц вперед.
"""
pety_name = 'Pety'
pety_key = 'key_123'
# self.assertFalse(has_account(pety_name, pety_key))
# create_account(pety_name, pety_key)
# self.assertTrue(has_account(pety_name, pety_key))
#key = get_account_key(pety_name, pety_key)
# pety_account = get_account(pety_name, pety_key)
# self.assertEqual([], list_company_for_member(pety_account))
# create_company(board_title, pety_account)
# board_title = 'Pety Family'
# company = Company.objects.filter(title=board_title)
#self.assertTrue(company.allow_access_by(pety_account))
# self.assertEqual(1, len(list_company_for_member(pety_account)))
cheque_qr_1 = 'qwerty'
cheque_qr_2 = '123456'
fns_json_cheque_info_1 = get_info_by_qr_from_fns(cheque_qr_1)
fns_json_cheque_info_2 = get_info_by_qr_from_fns(cheque_qr_2)
# save_in_company_fns_json_cheque_repository(company, pety_account, fns_json_cheque_info_1)
function_title = 'milk and ...'
# create_function_consumption(company, pety_account, function_title)
milk_function = get_function(function_title) # следует обезопасить код получением только функций внутри компании
for element in consumption_elements(fns_json_cheque_info_1):
milk_function.append(element)
self.assertEqual(0.424, milk_function.average_weight_per_day_in_during_period())
delta_days_future = milk_function.days_future()
self.assertEqual(8.333, delta_days_future)
cheese_function_title = 'cheese'
cheese_function = get_function(cheese_function_title)
for element in consumption_elements(fns_json_cheque_info_2):
cheese_function.append(element)
self.assertEqual(0.097, cheese_function.average_weight_per_day_in_during_period())
self.assertEqual(0.09, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-01T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.088, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-02T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.104, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-03T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.102, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-04T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.099, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-05T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.097, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-06T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.095, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-07T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.093, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-08T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.091, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-09T10:00:00", '%Y-%m-%dT%H:%M:%S')))
delta_days_future = cheese_function.days_future()
self.assertEqual(9.0, delta_days_future)
def test_7(self):
"""
создадим работника
Создать или выбрать компанию из имеющихся.
создать может рботник
Добавить часть чеко в хранилище
досьать их из хранилища
создать фунуцию предсказания
укзать у нужных позиций в чеках созданную функцию
спрогнозировать на 4 дня
"""
pety_employee = Employee(title='Pety', key='123zxc')
pety_employee.save()
#company_family = Company(owner=pety_employee, title='family')
company_family = Company(title='family')
company_family.save()
company_family.employees.add(pety_employee)
company_family = None
company_family = Company.objects.filter(employees__in=[pety_employee])[0]
function_title = 'Мilk'
plf_milk = PredictionLinearFunction(title=function_title)
plf_milk.save()
for i in Base.list_buy_milk()[:5]:
#for i in Base.list_buy_milk():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'])
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'])
e.save()
plf_milk.cheque_elements.add(e)
plf_cheese = PredictionLinearFunction(title='Cheese')
plf_cheese.save()
for i in Base.list_buy_cheese():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'])
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'])
e.save()
plf_cheese.cheque_elements.add(e)
#проверить авто определение веса
good_count = 0
for e in FNSChequeElement.objects.all():
if e.has_weight_from_title() and float(e.get_weight_from_title()) == float(e.volume):
#if float(e.get_weight()) == float(e.volume * e.quantity):
good_count += 1
if not e.get_title() in [u'СЫР МАСКАР.80% 250ГР', u'СЫР ПЛ С ЛУКОМ 90Г', u'СЫР ПЛ ВОЛНА 45% 90Г', u'СЫР ПЛ ФЕТАКСА 400Г', u'СЫР ПЛ ВОЛНА 45% 90Г', u'СЫР ПЛ ВОЛНА 45% 90Г']:
print e.get_title().encode('utf8')
assert False
self.assertEqual(6, good_count)
#fetch
milk_function = PredictionLinear([])
for element in PredictionLinearFunction.objects.filter(title=function_title):
for i in element.elements():
milk_function.append(i)
cheese_function = PredictionLinear([])
for element in PredictionLinearFunction.objects.filter(title='Cheese'):
for i in element.elements():
cheese_function.append(i)
self.assertEqual(0.396, milk_function.average_weight_per_day_in_during_period())
delta_days_future = milk_function.days_future()
self.assertEqual(10.135, delta_days_future)
self.assertEqual(0.097, cheese_function.average_weight_per_day_in_during_period())
self.assertEqual(0.088, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-02T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.104, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-03T10:00:00", '%Y-%m-%dT%H:%M:%S')))
self.assertEqual(0.102, cheese_function.average_weight_per_day_from_first_buy_to_this_date(datetime.strptime("2020-06-04T10:00:00", '%Y-%m-%dT%H:%M:%S')))
delta_days_future = cheese_function.days_future()
self.assertEqual(9.0, delta_days_future)
##
for i in Base.list_buy_milk()[5:]:
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'])
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'])
e.save()
plf_milk.cheque_elements.add(e)
#fetch
milk_function = PredictionLinear([])
for element in PredictionLinearFunction.objects.filter(title=function_title):
for i in element.elements():
milk_function.append(i)
self.assertEqual(0.424, milk_function.average_weight_per_day_in_during_period())
delta_days_future = milk_function.days_future()
self.assertEqual(8.333, delta_days_future)
def test_8(self):
#TODO не указал что при подученни и функций можем брать только функции в компании котрую мы просчитываем
# слежует релатзовать интерфейс у класа репоитория функций когда:
# 1. всегда укзаывается компания для коророй подучается функция
# 2. или фуннкции можно получать только через когмпанию где компания является агрегатом функции чеков
# агрегат предпочтительнее так как явно видно его границы,
# а при работе с репозиторием не понятно почему нельзя создать метеод получения функции без компании.
# и это надо коментариями указывать в описании репозитория
#для быстроты МВП делаем так
"""
Сгенерить функции на основе имеющихся чеков в указанной компании.
не все клиенты пьют молоко а знаит функция молока им не нужна.
создадим ее только когда пользователь попросит
"""
print 'test 8'
#Берем все чеки на основе которых хотим понять что за функции нам нужны.
pety_employee = Employee(title='Pety', key='123zxc')
pety_employee.save()
company_family = Company(title='family')
company_family.save()
company_family.employees.add(pety_employee)
#добавляем чеки в компанию
for i in Base.list_buy_milk():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'])
fns_cheque.save()
for j in i['items']:
print j['name'].encode('utf8')
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'])
e.save()
for i in Base.list_buy_cheese():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'])
fns_cheque.save()
for j in i['items']:
print j['name'].encode('utf8')
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'])
e.save()
print 'Base:'
print FNSCheque.objects.all().count()
print FNSChequeElement.objects.all().count()
print '---'
#function = PredictionLinearFunction.objects.get(base_type=u'МОЛОКО')
#print function
#для каждого элемента чек пробуем определить под какие из заложенных функций он подходит.
#по содерибмому элемента - в назавние есть надпись МОЛОКО 3.2%
#по привязке карточки продуката - в назавании карточки продукат есть надпись МОЛОКО 3.2%
#по если в чеке есть не определнный товар Х и еще есть СМЕТАНА и ТВОРОГ,
# а также есть много чеков также с теремя товарами СМЕТАНА ТВОРОГО "МОЛОКО" - принимаем рещенеи что это МОЛОКО и добавляем его в функцию
fns_cheques = FNSCheque.objects.filter(company=company_family)
##получем все типы базвых функий которые возможны
#base_function_types = set()
#for fns_cheque in fns_cheques:
# #for fns_cheque_element in fns_cheque.elements():
# for fns_cheque_element in FNSChequeElement.objects.filter(fns_cheque=fns_cheque):
# for base_function_type in PredictionLinear.base_function_types(fns_cheque_element):
# base_function_types.add(base_function_type)
#for base_function_type in base_function_types:
# print base_function_type.encode('utf8')
#print
base_function_types = PredictionLinear.auto_find_available_base_function_type(fns_cheques)
##создаем недостающие базовые функции
#for base_function_type in base_function_types:
# if not PredictionLinearFunction.objects.filter(base_type=base_function_type).count():
# f = PredictionLinearFunction(base_type=base_function_type)
# f.save()
PredictionLinear.save_functions_with_base_function_types(base_function_types)
##получаем базовые функции
#functions = PredictionLinearFunction.objects.filter(base_type__in=base_function_types)
#for f in functions:
# print f
##наполняем функции чеками которые им подходят
#print len(fns_cheques)
#for fns_cheque in fns_cheques:
# #for fns_cheque_element in fns_cheque.elements():
# for fns_cheque_element in FNSChequeElement.objects.filter(fns_cheque=fns_cheque):
# print '--->'
# print fns_cheque_element.get_title().encode('utf8')
# for base_function_type in PredictionLinear.base_function_types(fns_cheque_element):
# function = PredictionLinearFunction.objects.get(base_type=base_function_type)
# function.cheque_elements.add(fns_cheque_element)
PredictionLinear.auto_add_cheque_elements_to_functions(fns_cheques)
function = PredictionLinearFunction.objects.get(base_type=u'МОЛОКО')
print function
print '----'
for f in PredictionLinearFunction.objects.all():
print f
elements = set()
##найдем чеки с 100% совпадением названия и связанных чеков, и на основе них рассчитам предложение за килограм
#elements = set()
#for e in function.cheque_elements.all():
# for en in FNSChequeElement.objects.filter(name=e.name):
# elements.add(en)
elements.update(function.cheque_have_name_like_name_from_contains_cheques())
##взять ключевые слова у функции которые могут содержаться в чеках
#for word in function.allow_key_words():
# qs = FNSChequeElement.objects.filter(name__icontains=word)
# for e_word in function.disallow_key_words():
# qs = qs.exclude(name__icontains=e_word)
# for e in qs:
# elements.add(e)
elements.update(function.cheque_contains_key_words())
for e in elements:
print e.offer_element_params()
#offers = []
#for e in elements:
# offers.append(e.offer_element_params())
#for o in offers:
# print o
print 'end'
def has_account(i, j):
return False
def create_account(i, j):
pass
def get_info_by_qr_from_fns(cheque_qr_1):
if cheque_qr_1 == 'qwerty':
return [1]
return []
def get_function(function_title):
return PredictionLinear([])
def consumption_elements(fns_json_cheque_info):
if fns_json_cheque_info:
list_buy = Base.list_buy_milk()
else:
list_buy = Base.list_buy_cheese()
elements = []
for i in list_buy:
for j in i['items']:
elements.append(Element(j['name'], i['dateTime'], j['volume']*j['quantity']))
return elements
```
#### File: consumption/product_card/models.py
```python
from __future__ import unicode_literals
from django.db import models
#from datetime import date
#from datetime import datetime
#from datetime import timedelta
#
#from django.conf import settings
#
#
#from account.models import Account
#
#import json
#
#import urllib
##import urllib2
#import urllib2, base64
#import pytz
#import re
#
#import time
class CatigoryTree(models.Model):
#11 групп продуктов: хлебные продукты (хлеб и макаронные изделия в пересчете на муку, мука, крупы, бобовые);картофель;овощи и бахчевые (капуста, огурцы, помидоры, морковь, свекла, лук);свежие фрукты (яблоки, апельсины, виноград, бананы);сахар и кондитерские изделия;мясопродукты (мясо говядины, баранины, свинины, птицы);рыбопродукты (рыба свежая, сельдь соленая);молоко и молокопродукты (молоко, сметана, масло сливочное, сыр, творог, кисломолочные продукты);яйца;растительное масло, маргарин и другие жиры;прочие продукты (соль, чай и специи).
parent = models.ForeignKey('self', blank=True, null=True)
title = models.CharField(blank=True, max_length=254)
def __create_base_catigory():
""" для первоночальной инициализации"""
CatigoryTree.objects.create(id=1, title=u'хлебные продукты (хлеб и макаронные изделия в пересчете на муку, мука, крупы, бобовые, вермишель, батон, крупа, лапша, кускус, киноа, макарон)')
CatigoryTree.objects.create(id=2, title=u'картофель')
CatigoryTree.objects.create(id=3, title=u'овощи и бахчевые (капуста, огурцы, помидоры, морковь, свекла, лук, чеснок, томат, редис, авокадо, перец, укроп, сельдерей)')
CatigoryTree.objects.create(id=4, title=u'вежие фрукты (яблоки, апельсины, виноград, банан, клубника, лимон, киви, чернослив, мандарин, помело)')
CatigoryTree.objects.create(id=5, title=u'сахар и кондитерские изделияяя (шоколад конфеты зефир, печенье)') # изделияяя - чтобы не было совпадений - иакаронные изделия
CatigoryTree.objects.create(id=6, title=u'мясопродукты (мясо говядины, баранины, свинины, птицы, сосиски, колбаса, пельмени)')
CatigoryTree.objects.create(id=7, title=u'рыбопродукты (рыба свежая, сельдь соленая, тунец, икра, лосос, форел, креветк)')
CatigoryTree.objects.create(id=8, title=u'молоко и молокопродукты(молоко, сметана, масло сливочное, сыр, творог, кисломолочные продукты, кефир, йогурт, сливки, сырок)')
CatigoryTree.objects.create(id=9, title=u'яйца (яйцо)')
CatigoryTree.objects.create(id=10, title=u'растительное масло, маргарин и другие жиры, майонез, соус')
CatigoryTree.objects.create(id=11, title=u'прочие продукты (соль, чай и специи, мед, орех, чипсы)')
def __create_sub_catigory():
"""Для первоночальной инициализации"""
# создаем группы, и подгруппы
for c in CatigoryTree.objects.all():
for word in c.title.split():
word = word.replace(',','').replace('.','').replace('(','').replace(')','')
word = word.upper()
if len(word) > 2:
CatigoryTree.objects.create(title=word, parent=c)
@staticmethod
def init_base():
"""Первоночальная инициализация"""
__create_base_catigory()
__create_sub_catigory()
def __unicode__(self):
return u"%s -> %s" % (self.title, self.parent)
class ProductCard(models.Model):
"""
Обогощенная информация по продуктам.
Нужна для того чтобы пользовптнль вместе с чеком не вбивал фотографии покупки, ее состав, и характеристик.
А просто выбрать подходящую на его взгляд обогощенную информацию.
Таким образом не надо знать какой точно год выпуска или партия - пользовтель сам решит.
Ему можно лишь помочь сделать автоматичекскую рекомендацию.
Т.е. после того как данный товар заведен, менять его нельзя, так как те места где он используется уже рассчитывают на это(пример с цветами айфона).
и значит нужно заводить всегда новую информацию о товаре и к новым продуктам предлагать более новую, а старым предлагать обновить основную привзяку
или позволить создавтаь новые.
Штрих код можно просить фотографировать и привязвать к позиции в чеке для более быстрого определения продукции.
"""
parent = models.ForeignKey('self', blank=True, null=True)
datetime_create = models.DateTimeField(blank=True, auto_now_add = True) # так как карточки не изменяются то дата создания карточки может быть как то связана с началом ее потребелния
title = models.CharField(blank=True, max_length=254)
barcode = models.CharField(blank=True, max_length=254) # щтрихкод
weight = models.SmallIntegerField(blank=True, null=True) # вес содержимого
is_packed = models.NullBooleanField() # товар продается не на вес(пачкой, блок, кансервная тара т.е. уже фасованный - не делимая - тельзя продать пол банки гороха)
#composition = models.TextField(blank=True, ) # состав
#image_main_v1 = models.CharField(blank=True, max_length=254)
#instruction = models.TextField(blank=True, ) # инструкция
catigory_tree = models.ForeignKey(CatigoryTree, blank=True, null=True)
catigory_sub_tree = models.ForeignKey(CatigoryTree, blank=True, null=True, related_name="product_info_2_catigory_sub_tree")
@classmethod
def find_by_text(cls, text):
title_parts = text.split(' ')
product_cards = set()
for part in title_parts:
if part in ['SPAR']:
continue
for product_card in ProductCard.objects.filter(title__contains=part):
product_cards.add(product_card)
return product_cards
@classmethod
def update_catigory_and_sub_catigory_for_all_products(cls):
# привязываем прдгрыы к продуктам
for p in ProductCard.objects.all():
for c in CatigoryTree.objects.all():
for word in c.title.split():
word = word.replace(',','').replace('.','').replace('(','').replace(')','')
word = word.upper()
if len(word) > 2:
if p.title.find(word) != -1:
if c.parent:
p.catigory_sub_tree = c
else:
p.catigory_tree = c
p.save()
def __str__(self):
return self.title.encode('utf8')
``` |
{
"source": "22OuNiJiang/myProject_001",
"score": 3
} |
#### File: 22OuNiJiang/myProject_001/yahaha.py
```python
import requests # 抓取网页的html源代码
import csv,random,time # 将数据写入到csv文件中;随机函数;时间
import socket # 异常处理
import http.client
from bs4 import BeautifulSoup # 代替正则式取源码中相应标签中的内容
def get_content(url,data=None): # 网页地址
header = { # header是requests.get的一个参数
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding':'gzip,deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
'Connection':'keep-alive',
'User-Agent':'Chrome/92.0.4515.131'
}
timeout = random.choice(range(80,100))
while True:
try:
rep = requests.get(url,headers=header,timeout=timeout) #异常处理
rep.encoding = 'utf-8'
break
except socket.timeout as e: #超时处理
print('错误1:',e)
time.sleep(random.choice(range(8,15)))
except socket.error as e: #网络联机错误
print('错误2:',e)
time.sleep(random.choice(range(20,60)))
except http.client.BadStatusLine as e: #服务器响应错误
print('错误3:',e)
time.sleep(random.choice(range(30,80)))
except http.client.IncompleteRead as e: #读取异常
print('错误4:',e)
time.sleep(random.choice(range(5,15)))
return rep.text
def get_data(url): #天气,温度
final = []
bs = BeautifulSoup(url,'html.parser') #创建一个BeautifulSoup对象
body = bs.body #获取body部分
data = body.find('div',{'id':'7d'})
ul = data.find('ul')
li = ul.find_all('li')
for day in li: #li遍历
temp = []
date = day.find('h1').string #找日期
temp.append(date)
inf = day.find_all('p')
temp.append(inf[0].string)
if inf[1].find('span') is None: #最高温在span标签
temperature_highest = None
else:
temperature_highest = inf[1].find('span').string
temperature_lowest = inf[1].find('i').string #最低温在i标签
temp.append(temperature_highest + '℃')
temp.append(temperature_lowest)
final.append(temp)
return final
def write_data(data,name): #写文件
file_name = name
with open(file_name,'a',errors ='Ignore',newline='') as i:
i_csv = csv.writer(i)
i_csv.writerows(data)
if __name__ == '__main__': #主函数
url = 'http://www.weather.com.cn/weather/101010300.shtml'
html = get_content(url)
result = get_data(html)
write_data(result,'weather.csv')
``` |
{
"source": "22pilarskil/7405K_20-21",
"score": 3
} |
#### File: src/testing/kalman.py
```python
import numpy as np
import matplotlib.pyplot as plt
import time
import random
class filter():
def __init__(self, mean0, var0, meanSensor, varSensor, meanMove, varMove, positions, distances):
self.mean0 = mean0
self.var0 = var0
self.meanSensor = meanSensor
self.varSensor = varSensor
self.positions = positions
self.distances = distances
self.meanMove = meanMove
self.varMove = varMove
def get_prediction(self):
new_var, new_mean = self.predict(self.var0, self.mean0, self.varMove, self.meanMove)
var, mean = self.correct(new_var, new_mean, self.varSensor, self.meanSensor)
vals=[]
for m in range(len(positions)):
var, mean = self.predict(var, mean, self.varMove, self.distances[m])
var, mean = self.correct(var, mean, self.varSensor, self.positions[m])
vals.append([var, mean])
return vals[-1]
def predict(self, var, mean, varMove, meanMove):
new_var = var + varMove
new_mean= mean+ meanMove
return new_var, new_mean
def correct(self, var, mean, varSensor, meanSensor):
new_mean=(varSensor*mean + var*meanSensor) / (var+varSensor)
new_var = 1/(1/var +1/varSensor)
return new_var, new_mean
positions = [0]
distances = [0]
predictions = []
variances = []
fixes_x = []
fixes_y = []
for numb in range(0, 130):
position = random.random()*10
positions.append(position)
distance = position-distances[-1]
distances.append(distance)
meanSensor = sum(positions)/len(positions)
varSensor = sum((i - meanSensor) ** 2 for i in positions) / len(positions)
meanMove = sum(distances)/len(distances)
varMove = sum((i - meanMove) ** 2 for i in distances) / len(distances)
ft = filter(0, 0, meanSensor, varSensor, meanMove, varMove, positions, distances)
pred=ft.get_prediction()
predictions.append(pred[1])
variances.append(pred[0])
if pred[0] > 8.1:
fixes_x.append(pred[0])
fixes_y.append(pred[1])
```
#### File: src/testing/other.py
```python
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Create figure for plotting
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = []
ys = []
# Initialize communication with TMP102
# This function is called periodically from FuncAnimation
def animate(i, xs, ys):
# Read temperature (Celsius) from TMP102
temp_c = 5
# Add x and y to lists
xs.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
ys.append(temp_c)
# Limit x and y lists to 20 items
xs = xs[-20:]
ys = ys[-20:]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys)
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('TMP102 Temperature over Time')
plt.ylabel('Temperature (deg C)')
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=1000)
plt.show()
print("done")
``` |
{
"source": "22PoojaGaur/A-Revenue-based-Product-Placement-Framework-to-Improve-Diversity-in-Retail-Businesses",
"score": 3
} |
#### File: A-Revenue-based-Product-Placement-Framework-to-Improve-Diversity-in-Retail-Businesses/src/DPRIP.py
```python
import globals
import random
import copy
TOTAL_SLOTS = 0
TOTAL_REVENUE = 0
DRANK_MEAN = 0
TOTAL_PLACED = 0
IDX = 0
TOP_REVENUE = {}
def calculate_diversification(itemsets):
''' Calculates diversification of itemsets using Parul's approach'''
assert type(itemsets) == list
unique_items = set()
for itemset in itemsets:
for item in itemset:
unique_items.add(item)
return len(unique_items) / len(itemsets)
# takes advantage of the fact that everything is passed
# by reference in python. So
# even though this function does not return anything,
# it still changes both slots and top_kui_per_slot_rev
# arrays
def insert_one_itemset(slots, top_kui_per_slot_rev, top_kui_ptrs, kui_idx, CAS, stype, method=None):
global TOTAL_SLOTS, TOTAL_REVENUE, DRANK_MEAN, TOTAL_PLACED
found = False
while not found:
top = [None,0]
for idx, val in enumerate(top_kui_per_slot_rev):
# ignoring single itemsets
if idx == 0 or idx == 1:
continue
if val > top[1]:
top = [idx, val]
if top[0] is None:
print ('NOT ENOUGH ITEMSETS')
return
top_kui_node = kui_idx[top[0]][top_kui_ptrs[top[0]]]
all_slots = list()
for slot_type in range (0, globals.NUM_TYPE_SLOTS):
all_slots.extend(slots[slot_type])
placed = [node[0] for node in all_slots]
if top_kui_node[0] not in placed:
node = kui_idx[top[0]][top_kui_ptrs[top[0]]]
price = node[2]
if method == 'DR':
price = node[2]
slots[stype].append((
node[0],
price,
node[-1]))
DRANK_MEAN += kui_idx[top[0]][top_kui_ptrs[top[0]]][-1]
TOTAL_PLACED += 1
factor = 1
TOTAL_REVENUE += (kui_idx[top[0]][top_kui_ptrs[top[0]]][-2] * 1.0) * factor
CAS[stype] -= len(kui_idx[top[0]][top_kui_ptrs[top[0]]][0])
found = True
else:
pass
top_kui_ptrs[top[0]] += 1
# if all itemsets of certain sized already placed
if top_kui_ptrs[top[0]] >= len(kui_idx[top[0]]):
top_kui_per_slot_rev[top[0]] = -1
else:
if method == 'D':
top_kui_per_slot_rev[top[0]] = kui_idx[top[0]][top_kui_ptrs[top[0]]][-1]/float(top[0])
else:
top_kui_per_slot_rev[top[0]] = kui_idx[top[0]][top_kui_ptrs[top[0]]][-2]/float(top[0])
if found:
break
return
# This placement scheme will place itemsets based on the per_slot_revenue
# contribution
def _DPRIP(deta_dict, kui_idx, dranks, slot_sizes, method):
global TOTAL_SLOTS, TOTAL_REVENUE, DRANK_MEAN, TOTAL_PLACED
slots = []
slot_types = len(slot_sizes)
for i in range(0, slot_types):
TOTAL_SLOTS += len(slot_sizes[i])
slots.append([])
CAS = []
for stype in range(0, slot_types):
CAS.append(len(slot_sizes[stype]))
top_kui_ptrs = [0]*(len(kui_idx.keys())+1)
top_kui_per_slot_rev = [0]*(len(kui_idx.keys())+1)
for i in kui_idx.keys():
try:
if method == 'D':
top_kui_per_slot_rev[i] = (kui_idx[i][top_kui_ptrs[i]][-1])/float(i)
else:
top_kui_per_slot_rev[i] = (kui_idx[i][top_kui_ptrs[i]][-2])/float(i)
except:
top_kui_per_slot_rev[i] = -1
for stype in range(0, slot_types):
while CAS[stype] > 0:
insert_one_itemset(
slots, top_kui_per_slot_rev,
top_kui_ptrs, kui_idx, CAS, stype, method)
print ('TOTAL REVENUE TRAINING ')
print (TOTAL_REVENUE)
print ('DRANK MEAN')
print (DRANK_MEAN/float(TOTAL_PLACED))
# print ('PATTERNS PLACED (TRAIN)')
# print (TOTAL_PATTERNS)
print ('SLOTS TOTAL')
print (TOTAL_SLOTS)
print ("TOTAL REVENUE")
import pprint
pprint.pprint(TOP_REVENUE, width=1)
return (slots, TOTAL_SLOTS, TOTAL_REVENUE,
DRANK_MEAN/float(TOTAL_PLACED))
def _RHDPRIP(data_dict, kui_R, kui_H, dranks, slot_sizes):
global TOTAL_SLOTS, TOTAL_REVENUE, DRANK_MEAN, TOTAL_PLACED
slots = [] # final slots 2d list
CAS = [] # currently available slots
slot_types = len(slot_sizes)
for stype in range(0, slot_types):
TOTAL_SLOTS += len(slot_sizes[stype])
slots.append([])
CAS.append(len(slot_sizes[stype]))
# store pointers to next candidate of each itemset length
top_kuir_ptrs = [0] * (len(kui_R.keys()) + 1)
top_kuir_per_slot_revenue = [0] * (len(kui_R.keys()) + 1)
for i in kui_R.keys():
try:
top_kuir_per_slot_revenue[i] = kui_R[i][0][-2]
except:
top_kuir_per_slot_revenue[i] = -1
top_kuih_ptrs = [0] * (len(kui_H.keys()) + 1)
top_kuih_per_slot_revenue = [0] * (len(kui_H.keys()) + 1)
for i in kui_H.keys():
try:
top_kuih_per_slot_revenue[i] = kui_H[i][0][-2]
except:
top_kuih_per_slot_revenue[i] = -1
pick_r_or_h = list(['R']*int(globals.R_RATIO * 10))
pick_r_or_h.extend(['H']*int(globals.H_RATIO * 10))
random.shuffle(pick_r_or_h)
kui_to_pick_ptr = 0
for stype in range(0, slot_types):
while CAS[stype] > 0:
if pick_r_or_h[kui_to_pick_ptr] == 'R':
insert_one_itemset(
slots, top_kuir_per_slot_revenue,
top_kuir_ptrs, kui_R, CAS, stype)
elif pick_r_or_h[kui_to_pick_ptr] == 'H':
insert_one_itemset(
slots, top_kuih_per_slot_revenue,
top_kuih_ptrs, kui_H, CAS, stype)
kui_to_pick_ptr = (kui_to_pick_ptr + 1)%10
print ('TOTAL REVENUE TRAINING ')
print (TOTAL_REVENUE)
print ('DRANK MEAN')
print (DRANK_MEAN/float(TOTAL_PLACED))
print ('SLOTS TOTAL')
print (TOTAL_SLOTS)
return (slots, TOTAL_SLOTS, TOTAL_REVENUE,
DRANK_MEAN/float(TOTAL_PLACED))
def _DIVERSIFICATION(data_dict, kui_idx, slot_sizes, method):
TOTAL_SLOTS = 0
TOTAL_REVENUE = 0
slots = []
#kUI = copy.deepcopy(kui_idx)
kUI = {}
A = 0.2
# NRl - a*NRl
NRl = 0
for i in kui_idx.keys():
if i not in kUI:
kUI[i] = []
for j in kui_idx[i]:
# appending NR at the end, has to remove it later
kUI[i].append((j[0], j[2], j[-1], j[-2]))
for slot_size in slot_sizes:
lbd = len(slot_size)
selected_list = []
max_div = -1
NRl = 0
# fill 30% by 4 itemsets, 30% by 3 itemsets, 40% by 2 itemsets
num_4 = int(((lbd * 30) / 100) / 4) # because each itemset would take 4 space
num_3 = int(((lbd * 30) / 100) / 3) # because each itemset would take 3 space
num_2 = int(((lbd * 40) / 100) / 2) # because each itemset would take 2 space
for i in range (0, num_2):
NRl += kui_idx[2][i][-2]
for i in range (0, num_3):
NRl += kui_idx[3][i][-2]
for i in range (0, num_4):
NRl += kui_idx[4][i][-2]
for i in range(0, 100):
# suffle and get diversification and keep with max diversification
# lbd = 2*num_2 + 3*num_3 + 4*num_4
short_list = list()
short_list.extend(random.choices(kUI[2][:2*lbd], k=num_2))
short_list.extend(random.choices(kUI[3][:2*lbd], k=num_3))
short_list.extend(random.choices(kUI[4][:2*lbd], k=num_4))
#print (short_list)
current_div = calculate_diversification(short_list)
sum_revenue = 0
for idx, i in enumerate(short_list):
sum_revenue += i[-1]
short_list[idx] = short_list[idx][:-1]
# DIV METHOD
if current_div > max_div and method == 'DIV':
max_div = current_div
selected_list = short_list
# HRD METHOD
if current_div > max_div and sum_revenue >= NRl - A*NRl and method == 'HRD':
max_div = current_div
selected_list = short_list
slots.append(selected_list)
print ('selected list ')
print (selected_list)
# remove the items that have been placed
for item in selected_list:
print ( '\n\n\n\ PRINT \n\n')
print ( item)
try:
kUI[len(item[0])].remove(item)
except:
pass
TOTAL_SLOTS += len(item[0])
TOTAL_REVENUE += item[1]
print ('Number of Shelves ' + str(len(slots)))
print (str(len(slots[0])))
print (str(len(slots[1])))
print (str(len(slots[2])))
return (slots, TOTAL_SLOTS, TOTAL_REVENUE, 0)
def DPRIP(data_dict, kui_idx, dranks, slot_sizes, method='R'):
if method == 'RH' or method == 'RDR':
return _RHDPRIP(data_dict, kui_idx['R'], kui_idx['DR'], dranks, slot_sizes)
elif method == 'DIV' or method == 'HRD':
return _DIVERSIFICATION(data_dict, kui_idx, slot_sizes, method)
else:
return _DPRIP(data_dict, kui_idx, dranks, slot_sizes, method)
```
#### File: A-Revenue-based-Product-Placement-Framework-to-Improve-Diversity-in-Retail-Businesses/src/main.py
```python
import argparse
import mock_data
import itertools
from data_parser import parse_data
from data_parser import parse_ch_dict
from calculate_drank import get_dranks
from kui_index import get_kui_index
from slots import get_slots
from DPRIP import DPRIP
from evaluate import evaluate_new
import globals
import _pickle as pkl
import time
import os
from os import path
from random import random
# Adding argument parser.
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', help='add path to data file')
parser.add_argument(
'--with_ch_path', '-ch_path', help='run the experiment with diversity', default=None)
args = parser.parse_args()
# Global variables.
DATA_FNAME = args.data
CH_FNAME = args.with_ch_path
def get_data():
assert CH_FNAME is not None
assert DATA_FNAME is not None
if path.isfile('dataset.pkl') and path.isfile('test.pkl'):
train = pkl.load(open('dataset.pkl', 'rb'))
test = pkl.load(open('test.pkl', 'rb'))
else:
(train, test) = parse_data(DATA_FNAME)
pkl.dump(train_data_dict, open('dataset.pkl', 'wb'))
pkl.dump(test_transactions, open('test.pkl', 'wb'))
ch_dict = parse_ch_dict(CH_FNAME)
dranks = get_dranks(train.keys(), ch_dict)
return (train, test, ch_dict, dranks)
if __name__ == '__main__':
# Get globals in variables
num_slots = globals.NUM_SLOTS
type_slots = globals.NUM_TYPE_SLOTS
method = globals.METHOD
# processing data
data_dict = {}
ch_dict = {}
dranks = {}
(train_data, test_transaction, ch_dict, dranks) = get_data()
if method == 'RH':
kui_R = get_kui_index(train_data, dranks=dranks, method='R')
kui_H = get_kui_index(train_data, dranks=dranks, method='H')
kui_idx = {
'R': kui_R,
'H': kui_H
}
elif method == 'RDR':
kui_R = get_kui_index(train_data, dranks=dranks, method='R')
kui_DR = get_kui_index(train_data, dranks=dranks, method='DR')
kui_idx = {
'R': kui_R,
'DR': kui_DR
}
else:
kui_idx = get_kui_index(train_data, dranks=dranks, method=method)
start = time.time()
# get empty slots
slots = get_slots(num_slots, type_slots, 0)
(slots, num_slots, tr_train, dr_train) = DPRIP(
train_data, kui_idx, dranks, slots, method)
end = time.time()
time = end - start
for level in range(0, len(slots)):
#print ("slot level %d", level)
l = 0
for node in slots[level]:
l += len(node[0])
#print (l)
(tr_test, dr_test, place_test, num_total_test, diversification) = evaluate_new(slots, test_transaction)
output = open('prip_slots_%d.pkl' % type_slots, 'wb')
pkl.dump(slots, output)
output.close()
pkl_prefix = 'results/' + method + '_'
metrics = {
'num_slots': num_slots,
'total_revenue_train': tr_train,
'drank_mean_train': dr_train,
'num_placed_test': place_test,
'num_total_test': num_total_test,
'total_revenue_test': tr_test,
'drank_mean_test': dr_test,
'train_time': time,
'diversification': diversification
}
for metric in metrics.keys():
fname = pkl_prefix + metric + '.pkl'
if path.isfile(fname):
data = pkl.load(open(fname, 'rb'))
os.remove(fname)
else:
data = []
data.append(metrics[metric])
pkl.dump(data, open(fname, 'wb'))
```
#### File: A-Revenue-based-Product-Placement-Framework-to-Improve-Diversity-in-Retail-Businesses/src/slots.py
```python
import random
# NOTE: not using zipf right now.
def get_slots(num_slots, type_slots, zipf):
'''
Args -
num_slots: Dictionary containing number of slots
for each slot type
type_slots: number of type of slots
zipf: zipf factor [NOT USED YET]
'''
slots = []
for i in range(0, type_slots):
slots.append([])
for _ in range(0, num_slots[i]):
slots[i].append(0)
return slots
``` |
{
"source": "22PoojaGaur/COMINE-Plusplus-Algorithm-for-Finding-Correlated-Patterns-from-Transactions",
"score": 3
} |
#### File: COMINE-Plusplus-Algorithm-for-Finding-Correlated-Patterns-from-Transactions/src/comine_k.py
```python
from globals import MIN_SUPPORT, MIN_ALL_CONF
import pprint
from tree import Tree
import logging
def get_support_max_limit(item, counts):
"""
Returms max support allowed for this item.
"""
return int(max(counts[item] / MIN_ALL_CONF, MIN_SUPPORT))
def find_max_item_support(pattern, supports):
"""
Returns support of item with maximum support among items in pattern.
pattern: List. list of items in pattern.
supports: Dict. item -> count dict
"""
max_support = -1
for item in pattern:
max_support = max(max_support, supports[item])
return max_support
def find_max_support_with_nc(pattern, sup_counts):
max_support = -1
for item in pattern:
if sup_counts[item] > max_support:
max_support = sup_counts[item]
return max_support
def make_pattern(p1, p2):
p1 = p1 if type(p1) == list else [p1]
p2 = p2 if type(p2) == list else [p2]
pattern = list(set(p1).union(set(p2)))
print ("CONVERT " + ' '.join(pattern))
return pattern
def find_support(pattern, supports):
"""
This method considers support of a pattern as the minimum support among its items
patterns: List. list of items in pattern.
supports: Dict
"""
min_support = None
for item in pattern:
if min_support is None:
min_support = supports[item]
else:
if supports[item] < min_support:
min_support = supports[item]
return min_support
def find_support_new(pattern, tree):
min_support = None
if len(pattern) == 1:
# the support count is simply sum of all counts in header
sup = 0
for node in tree.header[pattern[0]]:
sup += node.node_count
else:
idx = 0
first = pattern[idx]
current_node = tree.header[first][0]
sup = current_node.node_count
while idx < len(pattern)+1:
next_node = pattern[idx+1]
sup = min(sup)
pass
def find_support_with_nc(pattern, sup_counts):
min_support = None
for item in pattern:
if min_support is None:
min_support = sup_counts[item]
else:
if sup_counts[item] < min_support:
min_support = sup_counts[item]
return min_support
def find_all_conf(pattern):
pass
def get_prefixes(snodes):
"""
snodes: List. List of suffix nodes for a header
"""
all_prefixes = []
for node in snodes:
prefix = []
count = node.node_count
while node.parent is not None:
prefix.append(tuple([node.item, count]))
node = node.parent
all_prefixes.append(prefix)
return all_prefixes
def cominek(T, suffix=None, k=1, data=None, support_suffix=0):
"""
T: Object. Object of class Tree.
alpha: List. alpha for alpha projected database.
k: Int. Number of call made to this function. This is added because
function acts differently when called 1st time vs when called
subsequent times.
"""
if suffix is not None:
logging.debug('Running cominek with suffix %s and k is %s ', ' '.join(suffix), str(k))
else:
logging.debug('Running cominek with empty suffix')
logging.debug('Header for the tree passed is ')
logging.debug(T.get_header_sorted())
for ai, nc in T.get_header_sorted():
logging.debug('Process for header item %s ', ai)
if suffix is None:
beta = [ai]
support_beta = T.get_item_support(ai)
else:
beta = make_pattern(suffix, ai)
support_beta = min(support_suffix, T.get_item_support(ai))
logging.debug('BETA is %s', ' '.join(beta))
logging.debug('Support of beta is %s ', str(support_beta))
if support_beta >= MIN_SUPPORT and support_beta / T.max_support(beta) >= MIN_ALL_CONF:
print ("FINAL BETA ", ' '.join(beta))
all_conf = support_beta / T.max_support(beta)
logging.debug('All conf for beta is %s ', str(all_conf))
# get support range for ai
support_limit = max(T.get_item_support(ai) / MIN_ALL_CONF, MIN_SUPPORT)
logging.debug('Support limit for beta is %s ', str(support_limit))
all_prefixes = get_prefixes(T.header[ai])
logging.debug('All prefixes for header element %s is ', ai)
logging.debug(all_prefixes)
beta_tree = Tree()
for prefix in all_prefixes:
# check support condition for each item in prefix path
check_passed = []
for item in prefix:
# the prefix contains ai as well
if item[0] == ai:
continue
if T.get_item_support(item[0]) <= support_limit:
check_passed.append(item)
else:
break
# add the temporary branch in the beta projected database
check_passed.reverse()
print('\nTemp branch for prefix ', prefix)
print(check_passed)
if len(check_passed) > 0:
logging.debug('Inserting %s in beta tree ', ' '.join([c[0] for c in check_passed]))
beta_tree.insert_transaction([c[0] for c in check_passed], nc=check_passed[0][1]) # considering check passed has tuples
beta_tree.merge_tree()
logging.debug('Beta tree formed for %s is ', ' '.join(beta))
logging.debug(beta_tree)
beta_tree.print()
for node in beta_tree.get_all_nodes():
if node.item is None:
continue
print ('DEBUG: ', ' '.join(beta), str(node.item))
pattern = make_pattern(beta, node.item)
# pattern = list(set(beta).union(set(node.item)))
print ('DEBUG: ', ' '.join(pattern))
pattern_support = min(support_beta, beta_tree.get_item_support(node.item))
max_support = T.max_support(pattern)
logging.debug('DEBUG: max support for %s is %s', ' '.join(pattern), str(max_support))
if pattern_support < MIN_SUPPORT:
# delete this node
print('DEBUG: calling to delete node')
print (node.item)
beta_tree.delete_node(node)
elif pattern_support / max_support < MIN_ALL_CONF:
# delete this node
print('DEBUG: calling to delete node')
print(node.item)
beta_tree.delete_node(node)
else:
print ('PATTERN HERE ', ' '.join(pattern))
logging.debug('Final beta tree after node deletion is ')
logging.debug(beta_tree)
beta_tree.print()
print(beta_tree.header)
# call if more than one element in fp tree
if beta_tree.count_nodes() > 1:
cominek(beta_tree, beta, len(beta)+1, data=data, support_suffix=support_beta)
if __name__ == '__main__':
# Write code for testing here
pass
```
#### File: COMINE-Plusplus-Algorithm-for-Finding-Correlated-Patterns-from-Transactions/src/data.py
```python
from collections import OrderedDict
class Data(object):
def __init__(self):
self.supports = OrderedDict()
self.transactions = []
def read_data(self, filename):
with open(filename, 'r') as fin:
for trans in fin.readlines():
t = trans.strip().split(';')
self.transactions.append(t)
# find support
temp_supports = {}
for trans in self.transactions:
for item in trans:
if item in temp_supports:
temp_supports[item] += 1
else:
temp_supports[item] = 1
sorted_item_tuples = sorted(temp_supports.items(), key=lambda item: item[1], reverse=True)
for (item, count) in sorted_item_tuples:
self.supports[item] = count
```
#### File: COMINE-Plusplus-Algorithm-for-Finding-Correlated-Patterns-from-Transactions/src/get_frequent_items.py
```python
from collections import OrderedDict
def get_frequent_item_count_dict(fin):
item_count_dict = {}
# 2-d array to store data
data = []
for trans in fin.readlines():
t = trans.strip().split(';')
data.append(t)
"""
transaction -> a c b
data_dict = {
'a': 2,
'b': 3,
'c': 1
}
"""
# find support
for trans in data:
for item in trans:
if item in item_count_dict:
item_count_dict[item] += 1
else:
item_count_dict[item] = 1
sorted_item_tuples = sorted(item_count_dict.items(), key=lambda item: item[1], reverse=True)
result_dict = OrderedDict() # dictionary with key: item_name, value: count. Sorted by count
for (item, count) in sorted_item_tuples:
result_dict[item] = count
return result_dict, data
```
#### File: COMINE-Plusplus-Algorithm-for-Finding-Correlated-Patterns-from-Transactions/src/tree.py
```python
from collections import OrderedDict
import queue
import logging
class IdGenerator:
"""
The ids have many to 1 relationship with the itemnames.
"""
def __init__(self):
self.id_count = 0
self.id_to_node = {}
def get_new_id(self, node):
self.id_count += 1
self.id_to_node[self.id_count] = node
return self.id_count
def get_node_for_id(self, id):
try:
return self.id_to_node[id]
except KeyError:
raise ValueError("This id is not valid, " + str(id))
class Node:
def __init__(self, item_name=None, id=None, children=None, nc=1, id_gen=None):
if children is None:
children = {}
self.children = children
self.item = item_name
if id_gen is not None:
self.id = id_gen.get_new_id(self)
else:
self.id = id
self.parent = None
self.node_count = nc
def add_child(self, node):
"""
Add child `node` to the current node
"""
self.children[node.item] = node
node.parent = self
class Tree:
def __init__(self):
self.root = Node() # root node
self.header = {} # list of nodes where each branch of that item starts, key=item_name, value=list of nodes
self.id_generator = IdGenerator() # id generator for this tree
def get_header_sorted(self):
# returns header sorted by item support counts
it = sorted(self.header.items(), key=lambda v: sum([n.node_count for n in v[1]]), reverse=True)
return it
def get_item_support(self, item):
sup = 0
if item not in self.header:
# raise ValueError("The item passed does not belong to this tree, " + str(item))
return 0 # TODO: not sure why pass should work
for node in self.header[item]:
sup += node.node_count
return sup
def get_support(self, item, suffix):
total_support = 0
for node in self.header[item]:
current = self.find_node_in_children(node, suffix[0])
if current is None:
continue
cur_support = min(node.node_count, current.node_count)
for nj in suffix[1:]:
for child in current.children:
if nj == child:
current = current.children[child]
cur_support = min(cur_support, current.node_count)
else:
raise ValueError("Suffix is not present in children. This should not happen, " + child.item)
total_support += cur_support
return total_support
def max_support(self, pattern):
max_support = -1
for item in pattern:
max_support = max(max_support, self.get_item_support(item))
return max_support
def find_node_in_children(self, node, item):
if node.item == item:
return node
if len(node.children) == 0:
return None
for n in node.children:
result = self.find_node_in_children(node.children[n], item)
if result is None:
return result
return None
def insert_transaction(self, trans, nc=1):
current = self.root
for t in range(len(trans)):
if trans[t] not in current.children:
new_child = Node(trans[t], nc=nc, id_gen=self.id_generator)
current.add_child(new_child)
if trans[t] in self.header:
self.header[trans[t]].append(new_child)
else:
self.header[trans[t]] = [new_child]
current = new_child
else:
current = current.children[trans[t]]
current.node_count += nc
def merge_tree(self):
new_level = []
for node in self.root.children:
new_level.append(self.root.children[node])
prev_level = self.root
while len(new_level) > 0:
# apply merge at this level
sorted_level = sorted(new_level, key=lambda k: k.item)
cidx = 0
merged_level = []
while cidx < len(sorted_level) - 1:
if sorted_level[cidx].item != sorted_level[cidx + 1].item:
merged_level.append(sorted_level[cidx])
cidx += 1
continue
else:
# modify header
self.header[sorted_level[cidx].item].remove(sorted_level[cidx])
self.header[sorted_level[cidx].item].remove(sorted_level[cidx + 1])
for child in sorted_level[cidx].children:
if child in sorted_level[cidx + 1].children:
sorted_level[cidx + 1].children[child].node_count += sorted_level[cidx].children[
child].node_count
else:
sorted_level[cidx + 1].children[child] = sorted_level[cidx].children[child]
# del prev_level.children[sorted_level[cidx].item]
sorted_level[cidx + 1].node_count += sorted_level[cidx].node_count
self.header[sorted_level[cidx + 1].item].append(sorted_level[cidx + 1])
cidx += 1
new_level = []
for node in merged_level:
for sn in node.children:
new_level.append(node.children[sn])
def print(self):
q = queue.Queue()
q.put(self.root)
while not q.empty():
node = q.get()
# print(node)
if node.item is not None:
print(node.item + ' : ' + ' '.join(node.children))
for n in node.children:
q.put(node.children[n])
def contains(self, item, node_ids):
for node in node_ids:
if self.node_to_item[node] == item:
return True, node
return False, None
def get_all_nodes(self):
result = []
q = queue.Queue()
q.put(self.root)
while not q.empty():
node = q.get()
result.append(node)
for n in node.children:
q.put(node.children[n])
return result
def delete_node(self, node):
this_parent = node.parent
print('DEBUG: this parent')
print(this_parent == self.root)
print(this_parent.children)
print(this_parent.item)
if this_parent is None:
pass
else:
# this is a hacky way to handle the case where children A - B C and children B - C, and we want to delete B.
try:
del this_parent.children[node.item]
except KeyError:
pass
for n in node.children:
node.children[n].parent = this_parent
# this is a hacky way to handle the case where children A - B C and children B - C, and we want to delete B.
if node.children[n].item not in this_parent.children:
this_parent.children[node.children[n].item] = node.children[n]
def count_nodes(self):
return len(self.get_all_nodes())
``` |
{
"source": "22PoojaGaur/Efficient_Premiumness_And_Utilitybased_Itemset_Placement_Scheme_for_retail_stores",
"score": 3
} |
#### File: Efficient_Premiumness_And_Utilitybased_Itemset_Placement_Scheme_for_retail_stores/src/ARC.py
```python
def get_arc(data):
arc = {}
for itemset in data.keys():
for item in itemset:
if item not in arc:
arc[item] = (data[itemset][0] * data[itemset][1]) / len(itemset)
arc[item] += (data[itemset][0] * data[itemset][1]) / len(itemset)
return arc
```
#### File: Efficient_Premiumness_And_Utilitybased_Itemset_Placement_Scheme_for_retail_stores/src/data_parser.py
```python
import pyfpgrowth
import math
import random
# Set a very low threshold to get all itemsets
SUPPORT_THRESH = 50
K_FOR_KUI_IDX = 4
PRICE_BRACKETS = [
(0.01, 0.16), (0.17, 0.33), (0.34, 0.50), (0.51, 0.67),
(0.68, 0.84), (0.85, 1)]
def nonblank_lines(f):
for l in f:
line = l.rstrip()
if line:
yield line
def parse_data(data_file_name):
transactions = []
with open(data_file_name, 'r') as f:
for line in nonblank_lines(f):
transactions.append(line)
transactions = [[int(item) for item in transaction.strip().split(' ')] for transaction in transactions]
patterns = pyfpgrowth.find_frequent_patterns(transactions, SUPPORT_THRESH)
data = {}
# print (patterns)
for (itemset, support) in patterns.items():
if len(itemset) > K_FOR_KUI_IDX:
continue
price_idx = int(math.floor(5.5*random.random()))
price = random.randint(
int(100*PRICE_BRACKETS[price_idx][0]), int(100*PRICE_BRACKETS[price_idx][1]))/100.0
data[itemset] = (support, price)
return data
```
#### File: Efficient_Premiumness_And_Utilitybased_Itemset_Placement_Scheme_for_retail_stores/src/kui_index.py
```python
K_FOR_KUI_IDX = 4
def get_kui_index(data):
'''
data - dictionary input for data.
key -> tuple of itemsets
value -> tuple of (support, price)
'''
kui = {}
# Initialize all levels for dict
for i in range(1, K_FOR_KUI_IDX+1):
kui[i] = []
# Insert all itemsets in kui
for itemset in data.keys():
value = list([itemset]) + list(data[itemset]) + [data[itemset][0] * data[itemset][1]]
level = len(itemset)
if (len(itemset) > K_FOR_KUI_IDX):
continue
# use insertion sort to maintain sorted order at level
for i in range(0, len(kui[level])):
if value[-1] < kui[level][i][-1]:
kui[level].insert(i, tuple(value))
break
else:
kui[level].append(tuple(value))
return kui
```
#### File: Efficient_Premiumness_And_Utilitybased_Itemset_Placement_Scheme_for_retail_stores/src/slots.py
```python
import random
# NOTE: not using zipf right now.
def get_slots(num_slots, type_slots, zipf):
slots = []
premiumness_idx = {}
start = 0.01
for i in range(1, type_slots+1):
premiumness_idx[i-1] = (start, start+(1/(type_slots)))
slots.append([])
for _ in range(0, int(num_slots/type_slots)):
slots[i-1].append(0)
else:
rem = num_slots - type_slots*int(num_slots/type_slots)
for k in range(0, rem):
slots[-1].append(0)
return slots
``` |
{
"source": "22PoojaGaur/Improving-Diversity-Performance-of-Association-Rule-Based-Recommender-Systems",
"score": 3
} |
#### File: Improving-Diversity-Performance-of-Association-Rule-Based-Recommender-Systems/src/calculating_drank_for_tesco_dataset.py
```python
from __future__ import division
import sys
import extract_frequent_pattern
def calculate_nodes_in_projection(patterns, ch_dict=None):
if ch_dict is None:
raise ValueError
nodes_in_proj = set()
for item in patterns:
path = set(ch_dict[item])
# print [node.value for node in path]
nodes_in_proj = nodes_in_proj | path
return len(nodes_in_proj)
def calculate_drank(patterns, ch_dict=None, ch_height=None):
if ch_dict is None or ch_height is None:
raise ValueError
num_node_in_proj = calculate_nodes_in_projection(patterns, ch_dict=ch_dict)
num_item_in_pat = len(pattern)
h = ch_height
try:
drank = (num_node_in_proj - (num_item_in_pat + h - 1)) / (
(h-1)*(num_item_in_pat -1))
except ZeroDivisionError as e:
drank = "NOT PROCESSED"
return drank
def read_input():
if len(sys.argv) < 3:
print("program expects name of input file and pattern file")
sys.exit(1)
try:
in_filname = sys.argv[1]
pat_filename = sys.argv[2]
fin = open(in_filname, 'r')
fpat = open(pat_filename, 'r')
except:
print("file not found")
sys.exit(1)
item_path_dict = {}
is_item = True
cur_item = ''
path = []
for line in fin.readlines():
path = []
if line.strip() == '':
continue
if is_item:
# line is item
item = line.strip()
cur_item = item
is_item = 0
else:
# line is path
path = line.strip().split(' ')
item_path_dict[cur_item] = path
is_item = 1
# threshold randomly decided
freq_patterns =extract_frequent_pattern.extract_frequent_patterns(fpat, 18)
return (item_path_dict, freq_patterns)
def main():
(item_path_dict, freq_patterns) = read_input()
ch_height = 0
for item in item_path_dict.keys():
if len(item_path_dict[item]) > ch_height:
ch_height = len(item_path_dict[item])
print("concept hierarchy height")
print (ch_height)
dranks = [0]*len(freq_patterns)
idx = 0
for patterns in freq_patterns:
print("processing pattern %s", ' '.join(patterns), '\n')
try:
# calculate drank
dranks[idx] = calculate_drank(patterns, ch_dict=item_path_dict, ch_height=ch_height)
except KeyboardInterrupt:
dranks[idx] = "SKIPPED"
print(str(dranks[idx]), '\n')
idx += 1
# convert patterns to list
fin_patterns = []
for i in freq_patterns:
fin_patterns.append(' '.join(i))
# Now we have drank for evey frequent pattern.
for i in range(0, len(freq_patterns)):
print("patterns ", str(i), "- ",)
print(fin_patterns[i])
print("drank - ")
print(dranks[i])
print('\n')
if __name__ == '__main__':
main()
```
#### File: Improving-Diversity-Performance-of-Association-Rule-Based-Recommender-Systems/src/make_recommendations.py
```python
def get_recommendation(X, rule_inter):
result = []
for i in rule_inter:
if set(i.split('=>')[0].strip().split(' ')).issubset(set(X)):
result.append(i)
recs = set()
for i in result:
if len(i.split('=>')[1].strip().split(' ')) == 1:
# print(i.split('=>')[1].strip())
recs.add(i.split('=>')[1].strip())
return (result, list(recs))
if __name__ == '__main__':
inp = input('enter input file for recommendations -> ')
Xs = open(inp, 'r').read().split('\n')
ass_c = open('AR_sorted_by_confidence.txt', 'r').read().split('\n')
ass_d = open('AR_sorted_by_drank.txt', 'r').read().split('\n')
# store the intersection of rules of both sorted list
rule_inter = []
rule_conf_dict = {}
rule_drank_dict = {}
fin_dict = {}
for i in ass_c:
rule_conf_dict[i.split('|')[0].strip()] = i.split('|')[1].strip()
for i in ass_d:
rule_drank_dict[i.split('|')[0].strip()] = i.split('|')[1].strip()
sorted_conf_rules = []
for rule, conf in sorted(rule_conf_dict.items(), key=lambda x: x[1], reverse=True):
sorted_conf_rules.append(rule)
sorted_drank_rules = []
for rule, drank in sorted(rule_drank_dict.items(), key=lambda x: x[1], reverse=True):
# put threshold for drank here
thresh= 0.05
assert thresh != None
if float(drank) >= thresh:
sorted_drank_rules.append(rule)
rule_inter = list(set(sorted_conf_rules) & set(sorted_drank_rules))
# for rule, conf in rule_conf_dict.items():
# fin_dict[rule] = (conf, rule_drank_dict[rule])
# for key, value in sorted(fin_dict.items(), key=lambda x: x[1], reverse=True):
# rule_inter.append(key)
#print(rule_inter)
# rule_inter = list(set(rule_conf) & set(rule_drank))
for x in Xs:
if x == '':
continue
# print(x)
(rules, recs) = get_recommendation(x.strip().split(' '), rule_inter)
# print('rules for ' + x + ' -> ' + '\n'.join(rules))
print('recommendations for HC-HD ' + x + ' -> ' + ' '.join(recs))
print('\n')
(rules, recs) = get_recommendation(x.strip().split(' '), rule_conf_dict.keys())
print('recommendations for HC ' + x + ' -> ' + ' '.join(recs))
print('\n')
``` |
{
"source": "22preich/BlenderNetworkRender",
"score": 2
} |
#### File: 22preich/BlenderNetworkRender/render.py
```python
import bpy
import sys
dir = "C:/Users/foggy/Appdata/roaming/python37/site-packages"
sys.path.append(dir)
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
import eventlet
from eventlet import wsgi
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/submit', methods=['POST'])
def submit():
f = request.files['file']
filepath = 'tmp/' + secure_filename(f.filename)
f.save(filepath)
render(filepath)
return "<img src='/static/" + name_from_file(filepath) + ".png'>"
def render(filepath="tmp/alley.blend"):
bpy.ops.wm.open_mainfile(filepath=filepath)
#print("yay")
bpy.context.scene.cycles.device = 'GPU'
scene = bpy.context.scene
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = "C:/Users/foggy/Documents/Github/BlenderNetworkRender/static/" + name_from_file(filepath) + ".png"
print(bpy.ops.render.render(write_still=1))
def name_from_file(filename):
return filename.split("/").pop().split(".")[0]
# render()
#app.run("0.0.0.0", 5000)
wsgi.server(eventlet.listen(('', 8000)), app)
``` |
{
"source": "22preich/neural-interface",
"score": 3
} |
#### File: 22preich/neural-interface/text.py
```python
import sched, time
s = sched.scheduler(time.time, time.sleep)
def print_time():
print("From something", time.time())
def print_some_times():
print(time.time())
s.enter(5, 1, print_time, ())
s.enter(10, 1, print_time, ())
s.run()
print(time.time())
print_some_times()
``` |
{
"source": "22RC/eBPF-TcpLatency-NetPagefault",
"score": 2
} |
#### File: eBPF-TcpLatency-NetPagefault/NetPagefault/NetPagefault.py
```python
from __future__ import print_function
from bcc import BPF, PerfType , PerfSWConfig
from socket import inet_ntop, ntohs, AF_INET
from struct import pack
import signal
import socket
import argparse
import ctypes as ct
from time import sleep
# arguments
examples = """examples:
./NetPagefault.py # trace all TCP/UDP accept()s, connect()s,
default running 1000 seconds
./NetPagefault.py -t 100 # trace all TCP/UDP accept()s, connect()s,
running for 100 seconds
"""
parser = argparse.ArgumentParser(
description="Trace TCP/UDP socket connects, accepts and process pagefaults",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--time",
help="trace for this seconds")
args = parser.parse_args()
def signal_ignore(signum,frame):
print()
def get_key(list_of_keys,pid,comm):
i = -1
index = 0
while index < len(list_of_keys) and i == -1:
if list_of_keys[index][0] == pid and list_of_keys[index][2] == comm :
i = index
index += 1
return i
########################### eBPF programs #######################################
bT = BPF(src_file="source_TCPtracer.c")
bU= BPF(src_file="source_UDPtracer.c")
########################### TCP probe Connect ###################################
bT.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect_entryTCP")
bT.attach_kretprobe(event="tcp_v4_connect", fn_name="trace_tcp_connect")
############################ TCP/UDP probe Accept ###############################
bT.attach_kretprobe(event='inet_csk_accept',fn_name="trace_tcp_accept")
bU.attach_kretprobe(event="udp_recvmsg",fn_name="trace_udp_rcv")
########################### UDP probe Connect ###################################
bU.attach_kprobe(event="udp_sendmsg",fn_name="trace_connect_entryUDP")
bU.attach_kretprobe(event="udp_sendmsg", fn_name="trace_udp_connect")
########################### Pagefault probe #####################################
f = BPF(src_file="source_Procfault.c")
f.attach_perf_event(
ev_type=PerfType.SOFTWARE, ev_config=PerfSWConfig.PAGE_FAULTS_MIN,
fn_name="page_min_flt",sample_period=0,sample_freq=49)
#################################################################################
if args.time:
print("Running for {} seconds or hit Ctrl-C to end.".format(args.time))
timeRunning = args.time
else:
print("Running for {} seconds or hit Ctrl-C to end.".format(1000))
timeRunning = 1000
try:
sleep(float(timeRunning))
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal_ignore)
print(" ")
TASK_COMM_LEN = 16 # linux/sched.h
min_flt_count = {} # process page fault dict
tcp_map_v4_connect = {} # tcp client side
udp_map_v4_connect = {} # udp client side
tcp_map_v4_accept = {} # tcp server_side
udp_map_v4_accept = {} # udp server_side
#TCP stats socket connect (client_side)
print("\n")
print('Tcp client side (sock.connect) :\n')
print('{:<5s} {:<5s} {:<18s} {:<15s} {:<15s} {:<5s}'.format("PID","IPVER","PROC_NAME","IPSOURCE","IPDEST","DPORT"))
for (k,v) in bT.get_table("ipv4_map_client").items():
tcp_map_v4_connect[(k.pid, 4, k.comm, k.saddr, k.daddr, k.dport)] = v.value
keyslist = tcp_map_v4_connect.keys()
for i in keyslist:
print('{:<5d} {:<5d} {:<18s} {:<15s} {:<15s} {:<5d}'.format(i[0],i[1],i[2],
inet_ntop(AF_INET,pack("I",i[3])), inet_ntop(AF_INET,pack("I",i[4])),i[5]))
#TCP stats socket accept (server_side)
print(" \n")
print('Tcp server side (sock.accept) :\n')
print('{:<5s} {:<5s} {:<18s} {:<15s} {:<15s} {:<5s}'.format("PID","IPVER","PROC_NAME","IPSOURCE","IPDEST","LPORT"))
for (k,v) in bT.get_table("ipv4_map_serverTCP").items():
tcp_map_v4_accept[(k.pid, 4, k.comm, k.saddr, k.daddr, k.lport)] = v.value
keyslist_acceptTCP = tcp_map_v4_accept.keys()
for i in keyslist_acceptTCP:
print('{:<5d} {:<5d} {:<18s} {:<15s} {:<15s} {:<5d}'.format(i[0],i[1],i[2],
inet_ntop(AF_INET,pack("I",i[3])), inet_ntop(AF_INET,pack("I",i[4])),i[5]))
#UDP stats socket accept (server_side)
print(" \n")
print('Udp server side (sock.accept) :\n')
print('{:<5s} {:<5s} {:<18s} {:<15s} {:<15s} {:<5s} '.format("PID","IPVER","PROC_NAME","IPSOURCE","IPDEST","LPORT"))
for (k,v) in bU.get_table("ipv4_map_serverUDP").items():
udp_map_v4_accept[(k.pid, 4, k.comm, k.saddr, k.daddr, k.lport)] = v.value
keyslist_acceptUDP = udp_map_v4_accept.keys()
for i in keyslist_acceptUDP:
print('{:<5d} {:<5d} {:<18s} {:<15s} {:<15s} {:<5d} '.format(i[0],i[1],i[2],
inet_ntop(AF_INET,pack("I",i[3])), inet_ntop(AF_INET,pack("I",i[4])),i[5]))
#UDP stats socket connect (client_side)
print(" \n")
print('Udp client side (sock.connect) :\n')
print('{:<5s} {:<5s} {:<18s} {:<15s} {:<15s} {:<5s}'.format("PID","IPVER","PROC_NAME","IPSOURCE","IPDEST","DPORT"))
for (k,v) in bU.get_table("ipv4_map_client_UDP").items():
udp_map_v4_connect[(k.pid, 4, k.comm, k.saddr, k.daddr, k.dport)] = v.value
keyslist_connectUDP = udp_map_v4_connect.keys()
for i in keyslist_connectUDP:
print('{:<5d} {:<5d} {:<18s} {:<15s} {:<15s} {:<5d}'.format(i[0],i[1],i[2],
inet_ntop(AF_INET,pack("I",i[3])), inet_ntop(AF_INET,pack("I",i[4])),i[5]))
print("\n")
for (k, v) in f.get_table('min_flt_table').items():
idx_key = get_key(keyslist, k.pid , k.comm)
version = 0
if idx_key >= 0:
keysTuple = keyslist[idx_key]
saddr, daddr = keysTuple[3] , keysTuple[4]
version = 4
else:
idx_key = get_key(keyslist_acceptTCP, k.pid , k.comm)
version = 0
if idx_key >= 0:
keysTuple = keyslist_acceptTCP[idx_key]
saddr, daddr = keysTuple[3] , keysTuple[4]
version = 4
else:
idx_key = get_key(keyslist_acceptUDP, k.pid , k.comm)
version = 0
if idx_key >= 0:
keysTuple = keyslist_acceptUDP[idx_key]
saddr, daddr = keysTuple[3] , keysTuple[4]
version = 4
if version != 0:
min_flt_count[(k.pid, k.comm)] = (v.value,version,saddr,daddr)
print("Page fault: \n")
print('{:<8s} {:<18s} {:<10s} {:<12s}'.format("PID","NAME","IPVER","MIN_FLT"))
for (k, v) in sorted(f.get_table('min_flt_table').items(), key= lambda (k,v): v.value,reverse=True):
try:
(value,version,sa,da) = min_flt_count[(k.pid, k.comm)]
except KeyError:
version = 0
if version != 0:
print('{:<8d} {:<18s} {:<10d} {:<12d}'.format(
k.pid, k.comm.decode(), version,
value))
``` |
{
"source": "22shubh22/online-judge",
"score": 2
} |
#### File: judge/submission/task.py
```python
from celery import shared_task
from .models import Submission, SubmissionTest
from question.models import Question, Test
import sys
import json
@shared_task
def queue_submission(submission_id):
submission = Submission.objects.get(id=submission_id)
submission.status = "processing"
submission.save()
code_file_path = submission.code_file.path
code_file_name = submission.code_file.name
filename = code_file_name.split("/")[1].split(".")[0]
try:
import importlib.util
spec = importlib.util.spec_from_file_location("code." + str(filename), str(code_file_path))
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
# pass parameters for submit, from test file of question
temp_submission_status = "AC"
for test_obj in Test.objects.filter(question=submission.question.id):
input_test = json.loads(test_obj.test)
print(input_test)
print(type(input_test))
# run the code
output = foo.submit(*input_test)
# create a submit tet object
submit_test = SubmissionTest.objects.create(test=test_obj, submission=submission, output=json.dumps(output))
if submit_test.output == test_obj.correct_answer:
submit_test.status = "AC"
submit_test.save()
elif submit_test.output != "" and submit_test.output != test_obj.correct_answer:
submit_test.status = "WA"
submit_test.save()
temp_submittion_status = "WA"
submission.status = temp_submission_status
submission.save()
return "No run time error"
except Exception as e:
error = str(e)
print("run time error")
print(error)
return error
``` |
{
"source": "22subash/Redbot",
"score": 3
} |
#### File: 22subash/Redbot/discordbot.py
```python
import discord
from discord.ext import commands
import asyncpraw
# Getting things from reddit
reddit = asyncpraw.Reddit(client_id='client_id',
client_secret='client_secret',
user_agent='user_agent')
# Discord bot stuff
client = discord.Client()
@client.event
# If user types "/get"
async def on_message(message):
if message.content.startswith("/get"):
sort = message.content[5:6]
redditval = message.content[7:]
print(sort)
print(redditval)
await message.channel.send("Please enter a valid subreddit and sort by, otherwise nothing will appear")
submission = await reddit.subreddit(redditval)
if (sort == "h"):
sort = "hour"
elif (sort == "d"):
sort = "day"
elif (sort == "w"):
sort = "week"
elif (sort == "m"):
sort = "month"
elif (sort == "y"):
sort = "year"
elif (sort == "a"):
sort = "all"
submissions = await reddit.subreddit(redditval)
posts = submissions.top(limit=1)
async for post in posts:
url = post.url
if (url.endswith(('.jpg', '.png', '.gif', '.jpeg'))):
embed = discord.Embed(title= post.title)
embed.set_image(url=post.url)
await message.channel.send(embed=embed)
# If user types "/help"
if (message.content == ("/help")):
embed = discord.Embed(title= "Commands", discription= "Below are the commands that you can use with this bot", color=discord.Color(15105570))
embed.set_footer(text="Please use (.) before each command")
embed.add_field(name=".get (enter the way you want to sort by) (enter a subreddit name)", value="Gets 1 picture from the way you want to sort of your chosen subreddit", inline=True)
await message.channel.send(embed=embed)
client.run(TOKEN)
``` |
{
"source": "22th/SDD-Major-Work",
"score": 3
} |
#### File: 22th/SDD-Major-Work/LogicPuzzler.py
```python
import pygame as py
import random as rand
import eztext
import numpy as np
py.init()
#vars below
class Images:
image=[]
x=0
y=0
img_count=0
img_selected=False
correct=False
class Scores:
NS=[]
screen_width = 1000
screen_height = 700
screen = py.display.set_mode((screen_width,screen_height))
lock = py.time.Clock()
myfont = py.font.SysFont("Comic Sans MS",11)
yfont = py.font.SysFont("Comic Sans MS",20)
running=True
txtbx = eztext.Input(maxlength=10, color=(255,255,255), prompt='Name: ')
startbutt=py.image.load("startbutton.png")
SL=0
py.display.set_caption("Logic Puzzler")
RED = (255,0,0)
BLUE = (0,0,255)
GREEN = (0,255,0)
BLACK = (0,0,0)
WHITE = (255,255,255)
ORANGE = (255,180,0)
prevclickedRes=0
prevclickedPlay=0
GreyBox=py.image.load("gray.jpg")
GreyBox=py.transform.scale(GreyBox,(50,50))
prevclickedResImg=0
Correctspot=[]
score=0
name=""
errorcount=[0,False]
for i in range(25):
Correctspot.append(False)
#vars above
def screenMsg(screen,x,y,font,text,color):
text=str(text)
xv=font.render(text,1,color)
screen.blit(xv,(x,y))
def rectbutton(screen,x,y,w,h,color,events,text,font,fcolor):
xv=font.render(text,1,fcolor)
py.draw.rect(screen,color,(x,y,w,h))
rect=py.Rect(x,y,w,h)
screen.blit(xv,(x+w/2-py.Surface.get_width(xv)/2,y+h/2-py.Surface.get_height(xv)/2))
for evet in events:
if evet.type == py.MOUSEBUTTONUP:
if rect.collidepoint((py.mouse.get_pos())):
return(True)
def imgbutton(screen,img,x,y,events):
screen.blit(img,(x,y))
butt=py.Rect(x,y,img.get_width(),img.get_height())
for event in events:
if event.type == py.MOUSEBUTTONUP:
if butt.collidepoint((py.mouse.get_pos())):
return(True)
return(False)
def Selectlevel():
lock.tick(10)
events=py.event.get()
for event in events:
if event.type == py.QUIT:
running=False
py.display.quit()
screen.fill((50,50,50))
cou=0
for j in range(3):
for i in range(5):
cou=cou+1
click=rectbutton(screen,175+160*i,300+100*j,50,50,BLACK,events,str(cou),myfont,WHITE)
if click == True:
return(cou)
py.display.flip()
return(16)
def gamescreen(levelnum,LevImageRes,LevImagePlay):
lock.tick(100)
global prevclickedResImg
global prevclickedRes
global Correctspot
global score
events = py.event.get()
for event in events:
if event.type == py.QUIT:
running=False
screen.fill((50,50,50))
Playcount=0
tog1=False
for j in range(5):
for i in range(5):
if LevImagePlay[Playcount].img_selected == False:
tog1=imgbutton(screen,LevImagePlay[Playcount].image[LevImagePlay[Playcount].img_count],LevImagePlay[Playcount].x,LevImagePlay[Playcount].y,events)
if LevImagePlay[Playcount].img_selected == True:
tog1=imgbutton(screen,GreyBox,LevImagePlay[Playcount].x,LevImagePlay[Playcount].y,events)
if tog1 == True:
if LevImagePlay[Playcount].img_count == prevclickedResImg:
LevImagePlay[Playcount].img_selected = False
LevImageRes[prevclickedRes].correct=True
Correctspot[prevclickedRes]=True
score=score+10
else:
score=score-10
Playcount+=1
coun=0
tog=False
for j in range(5):
for i in range(5):
if LevImageRes[coun].img_selected == False:
tog=imgbutton(screen,LevImageRes[coun].image[LevImageRes[coun].img_count],LevImageRes[coun].x,LevImageRes[coun].y,events)
if LevImageRes[coun].img_selected == True or LevImageRes[coun].correct==True:
screen.blit(GreyBox,(LevImageRes[coun].x,LevImageRes[coun].y))
if tog == True:
LevImageRes[prevclickedRes].img_selected = False
prevclickedRes=coun
prevclickedResImg=LevImageRes[coun].img_count
LevImageRes[coun].img_selected = True
coun+=1
count=0
for j in range(5):
for i in range(5):
screen.blit(LevImagePlay[count].image[LevImagePlay[count].img_count],(LevImageRes[count].x+10,LevImageRes[count].y-300))
count+=1
screenMsg(screen,400,100,yfont,"LOGIC PUZZLER",ORANGE)
Scoremsg="Score: "+str(score)
Scoremsg=str(Scoremsg)
scomsg=myfont.render(Scoremsg,1,RED)
screen.blit(scomsg,(100,100))
py.display.flip()
score-=0.01
if all(Correctspot):
if prevclickedRes == 26:
return(score)
prevclickedRes=26
return(-100000000000)
def scorescreen(score,data):
lock.tick(10)
events=py.event.get()
for event in events:
if event.type == py.QUIT:
running=False
py.display.quit()
Scorerank=['5th: ','4th: ','3rd: ','2nd: ','1st: ']
screen.fill((50,50,50))
i=5
while i != 0:
temp=data[i-1]
screenMsg(screen,100,350-50*i,yfont,Scorerank[i-1]+str(temp[0]),RED)
screenMsg(screen,200,350-50*i,yfont,temp[1],RED)
i=i-1
py.display.flip()
def main():
global name
global errorcount
events=py.event.get()
for event in events:
if event.type == py.QUIT:
running=False
screen.fill((50,50,50))
select=False
lvnum=False
levnum=16
gs=False
gsnum=-100000000000
gsgo=False
txtbx.update(events)
#blit txtbx on the sceen
txtbx.set_pos(100,10)
txtbx.draw(screen)
name=txtbx.value
start=imgbutton(screen,startbutt,350,100,events)
if start == True and name !='':
select=True
if start == True or errorcount[1]==True and name == '' and errorcount[0] !=100:
errorcount[1]=True
screenMsg(screen,screen_width/2,screen_height/2,yfont,"INPUT A NAME",ORANGE)
errorcount[0]+=1
if errorcount[0] == 1000 and errorcount[1] == True:
errorcount[0]=0
errorcount[1]=False
while select and levnum == 16:
for event in py.event.get():
if event.type == py.QUIT:
running=False
levnum=Selectlevel()
if levnum != 16:
imcount=[]
for i in range(25):
imcount.append(i)
rand.shuffle(imcount)
count=0
LevFilesLoc="Levels/"+str(levnum)+"/"
LevImageRes=[]
for j in range(5):
for i in range(5):
LevImagesTemp=Images()
img=py.image.load(LevFilesLoc +str(count+1) +".jpg").convert_alpha()
img=py.transform.scale(img,(50,50))
LevImagesTemp.img_count=imcount[count]
LevImagesTemp.image.append(img)
LevImagesTemp.x=i*51
LevImagesTemp.y=j*51+440
LevImagesTemp.img_selected=False
LevImageRes.append(LevImagesTemp)
count+=1
count=0
LevImagesPlay=[]
for j in range(5):
for i in range(5):
LevImagesTemp=Images()
img=py.image.load(LevFilesLoc +str(count+1) +".jpg").convert_alpha()
img=py.transform.scale(img,(50,50))
LevImagesTemp.img_count=count
LevImagesTemp.image.append(img)
LevImagesTemp.x=i*51+400
LevImagesTemp.y=j*51+440
LevImagesTemp.img_selected=True
LevImagesTemp.correct=False
LevImagesPlay.append(LevImagesTemp)
count+=1
lvnum = True
while lvnum:
gs=gamescreen(levnum,LevImageRes,LevImagesPlay)
if gs != -100000000000:
lvnum=False
gsgo=True
data = np.genfromtxt('Scores.csv', delimiter=',', dtype=None, names=('Scores','Name'),encoding=None)
newrow=(gs,name)
checker=0
for i in range(5):
temp=data[i]
if newrow[0] > temp[0] and checker == 0:
data[i]=newrow
checker+=1
data.sort(order='Scores')
np.savetxt("Scores.csv",data,fmt=('%i, %s'),delimiter=",")
while gsgo:
scorescreen(gs,data)
py.display.flip()
while running:
lock.tick(100)
events=py.event.get(py.QUIT)
for event in events:
if event.type == py.QUIT:
running=False
main()
#xkcc
``` |
{
"source": "22TonyFStark/mmsegmentation",
"score": 2
} |
#### File: models/backbones/mlpmixer.py
```python
import torch
import torch.nn as nn
from mmcv.cnn.bricks.transformer import FFN
from mmcv.cnn.bricks.drop import DropPath
from mmcv.runner import BaseModule, ModuleList
from ..builder import BACKBONES
from .vit import VisionTransformer
class MLP_Mixer_Layer(BaseModule):
"""
MLP_Mixer_Layer replaces the AttentionLayer used in LayerScaleTransformer by MLP,
and replaces all LayerNorms as AffineLayers.
It is a simple residual network that alternates:
1. a linear layer in which image PATCHES interact, independently and identically
across channels,
2. a two-layer feed-forward network in which CHANNELS interact independently per
patch.
3. The differences between MLP-Mixer and ResMLP is that ResMLP uses layerscale and
replaces the Layernorm as Affine.
"""
def __init__(self,
embed_dims,
mlp_ratio=4,
num_fcs=2,
drop=0.,
drop_path=0.,
act_cfg=dict(type='GELU'),
init_values=1e-4,
num_patches = 196):
super(MLP_Mixer_Layer, self).__init__()
self.norm1 = nn.LayerNorm(embed_dims)
self.attn = nn.Linear(num_patches, num_patches)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.LayerNorm(embed_dims)
self.ffn = FFN(
embed_dims = embed_dims,
feedforward_channels = mlp_ratio*embed_dims,
num_fcs = num_fcs,
ffn_drop=drop,
dropout_layer=None,
act_cfg=act_cfg,
)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x).transpose(1,2)).transpose(1,2))
x = x + self.drop_path(self.ffn(self.norm2(x)))
return x
@BACKBONES.register_module()
class MLP_Mixer(VisionTransformer):
def __init__(self,
init_scale=1e-4,
img_size=(224, 224),
patch_size=16,
in_channels=3,
num_classes=150,
embed_dims=768,
depth=12,
drop_rate=0.,
act_cfg=dict(type="GELU"),
drop_path_rate=0.0,
**kwargs):
super(MLP_Mixer, self).__init__(
img_size=img_size,
patch_size=patch_size,
in_channels=in_channels,
num_classes=num_classes,
embed_dims=embed_dims,
depth=depth,
drop_rate=drop_rate,
act_cfg=act_cfg,
drop_path_rate=drop_path_rate,
**kwargs
)
num_patches = (img_size[0] // patch_size) * (img_size[1] // patch_size)
dpr = [drop_path_rate for i in range(depth)]
self.layers = ModuleList()
for i in range(depth):
self.layers.append(
LayerScalePatchMLP(
embed_dims=embed_dims,
drop=drop_rate,
drop_path=dpr[i],
act_cfg=act_cfg,
init_values=init_scale,
num_patches=num_patches)
)
self.norm1 = nn.LayerNorm(embed_dims) # ViT uses LayerNorm
``` |
{
"source": "23132143252246477785366541342/djangoWebApp",
"score": 3
} |
#### File: djangoWebApp/hello/views.py
```python
from django.shortcuts import render
from django.views import View
from .models import Form
from django.http import HttpResponse
import sqlite3
# Create your views here.
class Index(View):
def get(self, request):
return render(request, "hello/index.html")
class SignUp(View):
def get(self, request):
return render(request, "hello/signup.html")
def post(self, request):
form = Form(request.POST)
if form.is_valid():
db = sqlite3.connect("accounts.db")
cur = db.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS accounts(username, password);")
cur.execute(f"INSERT INTO accounts(username, password) VALUES('{form.cleaned_data.get('username')}', '{form.cleaned_data.get('password')}');")
db.commit()
db.close()
return HttpResponse('''<h1 style='text-align:center;font-family:Arial;'>
Account Creation Succeeded!</h1>
<a href="/" style='font-family:Arial;'><p style='text-align:center;color:blue;'>--> Back to Home Page</p></a>''')
else:
return HttpResponse(f"{form}")
class Login(View):
def get(self, request):
return render(request, "hello/login.html")
def post(self, request):
return HttpResponse("<h1></h1>")
class ViewAccounts(View):
def get(self, request):
con = sqlite3.connect("accounts.db")
cur = con.cursor()
cur.execute("SELECT * FROM accounts")
select = cur.fetchall()
con.close()
return render(request, "hello/accounts.html", context={'select': select})
``` |
{
"source": "2320sharon/localtileserver",
"score": 2
} |
#### File: localtileserver/tileserver/__init__.py
```python
from flask import Flask
from localtileserver.tileserver import rest, urls, views
from localtileserver.tileserver.blueprint import cache, tileserver
from localtileserver.tileserver.data import get_data_path
from localtileserver.tileserver.palettes import get_palettes, palette_valid_or_raise
from localtileserver.tileserver.utilities import (
get_cache_dir,
get_clean_filename,
make_vsi,
purge_cache,
)
def create_app(url_prefix="/"):
app = Flask(__name__)
cache.init_app(app)
app.register_blueprint(tileserver, url_prefix=url_prefix)
return app
``` |
{
"source": "2320sharon/Seg2Map",
"score": 3
} |
#### File: Seg2Map/CoastSeg/make_overlapping_roi.py
```python
import geopandas as gpd
import numpy as np
import shapely
from shapely.geometry import LineString
from shapely.ops import unary_union
from geojson import Feature, FeatureCollection, dump
import geojson
from tqdm.notebook import tqdm_notebook
# Global vars
TEMP_FILENAME = "temp.geojson"
def get_empty_overlap_df(csv_name: "str"):
"""Creates an empty geodataframe to hold the overlapping ROIs information"""
df_overlap = gpd.GeoDataFrame({"id": [],
'primary_id': [],
'geometry': [],
'intersection_area': [],
'%_overlap': []})
df_overlap = df_overlap.astype({'id': 'int32', 'primary_id': 'int32'})
return df_overlap
def get_ROIs(coastline: dict, roi_filename: str, csv_filename: str):
"""Writes the ROIs to a geojson file and the overlap between those ROIs to a csv file.
Arguments:
-----------
coastline : dict
Geojson containing the portion of the coastline clipped within the bbox
roi_filename:str
File name of the json file to hold the roi data
csv_filename:str
File name of the csv file to hold the overlap between the rois
"""
lines_list = get_linestring_list(coastline)
# TEMP_FILENAME: file where each segment of coastline's rois written to
# master_overlap_df: geopandas dataframe containing all the overlap data
# for the rois
# Start_id is used as the starting number for writing the ROI ids
start_id = 0
# list to hold all the end_ids created in create_overlap()
end_id_list = []
master_overlap_df = get_empty_overlap_df(csv_filename)
finalized_roi = {'type': 'FeatureCollection', 'features': []}
for line in tqdm_notebook(lines_list, desc="Calculating Overlap"):
geojson_polygons = get_geojson_polygons(line)
end_id = write_to_geojson_file(
TEMP_FILENAME,
geojson_polygons,
perserve_id=False,
start_id=start_id)
overlap_df = create_overlap(TEMP_FILENAME, line, start_id, end_id_list)
if len(end_id_list) != 0:
# Get the most recent end_id
# print(f"end_id_list {end_id_list}")
# print(f"Updating start id from {start_id}")
start_id = end_id_list.pop()
# print(f"Updating start id to {start_id}")
# print(f"AFTER POP: end_id_list {end_id_list}")
end_id_list = []
else:
# Once all the overlapping ROIs have been created update the
# start_id for the next set of ROIs
start_id = end_id
master_overlap_df = master_overlap_df.append(
overlap_df, ignore_index=False)
# Read the geojson data for the ROIs and add it to the geojson list
rois_geojson = read_geojson_from_file(TEMP_FILENAME)
for single_roi in rois_geojson["features"]:
finalized_roi["features"].append(single_roi)
# Write to the permanent geojson and csv files
write_to_geojson_file(roi_filename, finalized_roi, perserve_id=True)
master_overlap_df.to_csv(csv_filename, mode='a', header=False, index=False)
def get_selected_roi(selected_set: tuple, roi_geojson: dict) -> dict:
"""
Returns a dictionary containing the geojson of the ROIs selected by the user
Arguments:
-----------
selected_set:tuple
A tuple containing the ids of the ROIs selected by the user
roi_geojson:dict
A geojson dict containing all the rois currently on the map
Returns:
-----------
geojson_polygons: dict
geojson dictionary containing all the ROIs selected
"""
# Check if selected_set is empty
assert len(
selected_set) != 0, "\n Please select at least one ROI from the map before continuing."
# Create a dictionary for the selected ROIs and add the user's selected
# ROIs to it
selected_ROI = {}
selected_ROI["features"] = [
feature
for feature in roi_geojson["features"]
if feature["properties"]["id"] in selected_set
]
return selected_ROI
def min_overlap_btw_vectors(
geojsonfile,
csv_filename,
overlap_percent: float = .65):
overlap_btw_vectors_df = get_overlap_dataframe(geojsonfile)
# Set of IDs where the overlap >= 65%
drop_list = list(set(
overlap_btw_vectors_df[overlap_btw_vectors_df["%_overlap"] > overlap_percent]["primary_id"]))
# load the geojson for all the rois across all parts of the coastline
geojson = read_geojson_from_file(geojsonfile)
all_ids = []
ids_in_features = []
features = []
# Remove the features overlapping more than 65% from the geojson
for feature in tqdm_notebook(
geojson["features"],
desc="Removing ROI with Excessive Overlap"):
all_ids.append(feature["properties"]["id"])
if feature["properties"]["id"] not in drop_list:
features.append(
Feature(
geometry=feature["geometry"],
properties=feature["properties"]))
ids_in_features.append(feature["properties"]["id"])
ids_not_in_features = set(all_ids) - set(ids_in_features)
# Checks if all the ROIS were removed if this was the case then we want to
# return the original data
if len(ids_in_features) == 0:
# print("ALL ROIS were removed by overlap check")
return overlap_btw_vectors_df
else:
feature_collection = FeatureCollection(features)
with open(geojsonfile, 'w') as f:
dump(feature_collection, f)
# Drop the any rows with PID in the drop_list
min_overlap_df = overlap_btw_vectors_df[~overlap_btw_vectors_df["primary_id"].isin(
drop_list)]
min_overlap_df = min_overlap_df[~min_overlap_df["id"].isin(drop_list)]
min_overlap_df.to_csv(csv_filename)
return min_overlap_df
def read_geojson_from_file(selected_roi_file: str) -> dict:
"""
Returns the geojson of the selected ROIs from the file specified by selected_roi_file
Arguments:
-----------
selected_roi_file: str
The filename of the geojson file containing all the ROI selected by the user
Returns:
-----------
data: dict
geojson of the selected ROIs
"""
with open(selected_roi_file) as f:
data = geojson.load(f)
return data
def get_overlap_dataframe(filename):
# Read the geojson into a dataframe for the related ROIs along this
# portion of the coastline
df = gpd.read_file(filename)
# Make dataframe to hold all the overlays
df_master = gpd.GeoDataFrame({"id": [],
'primary_id': [],
'geometry': [],
'intersection_area': [],
'%_overlap': []})
df_master = df_master.astype({'id': 'int32', 'primary_id': 'int32'})
# Iterate through all the polygons in the dataframe
for index in df.index:
polygon = df.iloc[index]
df_intersection = compute_overlap(polygon, df)
if not df_intersection.empty:
df_master = df_master.append(df_intersection)
return df_master
def compute_overlap(
polygon: 'pandas.core.series.Series',
df: 'geopandas.geodataframe.GeoDataFrame'):
"""
Create a geopandas.geodataframe containing the overlap between the current polygon and the other ROIs in df
The geodataframe contains data about which ROIs in df intersected with the polygon as indicated by the column "id",
the id of polygon is in the column named "primary_id", the percentage of the polygon's area that was overlapping
is in the column "%_overlap", and lastly, the amount of polygon's area being intersected is in the "intersection_area"
column.
""
Arguments:
-----------
polygon: 'pandas.core.series.Series'
Polygon that is being checked for overlap with the other ROI polygons in df
df:'geopandas.geodataframe.GeoDataFrame'
A geodataframe containing all the ROI polygons along a particular vector
Returns:
-----------
res_intersection: 'geopandas.geodataframe.GeoDataFrame'
A geodataframe containing the ROIs that intersected with each other as well as the %_overlap calculated.
"""
# Create a dataframe for polygon currently being checked for overlap
poly_series = gpd.GeoSeries(polygon["geometry"])
df1 = gpd.GeoDataFrame(
{'geometry': poly_series, "primary_id": polygon["id"]})
df1 = df1.set_crs(df.crs)
# Overlay the current ROI's geometry onto the rest of ROIs within the same
# region. See if any intersect
res_intersection = df.overlay(df1, how='intersection')
# Drop the rows where the ROIs are overlapping themselves
res_intersection.drop(
res_intersection[res_intersection['id'] == res_intersection['primary_id']].index, inplace=True)
res_intersection = res_intersection.set_crs(df.crs)
# Append the intersection area to the dataframe
intersection_area = res_intersection.area
intersection_area = intersection_area.rename("intersection_area")
res_intersection = res_intersection.merge(
intersection_area, left_index=True, right_index=True)
# Get the area of any given ROI
total_area = df.area[0]
# Compute % overlap
df1_percent_overlap = res_intersection["intersection_area"] / total_area
df1_percent_overlap = df1_percent_overlap.rename("%_overlap")
# Add the % overlap to the dataframe
res_intersection = res_intersection.merge(
df1_percent_overlap, left_index=True, right_index=True)
res_intersection
return res_intersection
def write_to_geojson_file(
filename: str,
geojson_polygons: dict,
perserve_id: bool = False,
start_id: int = 0):
"""Make a filename.geojson file from dictionary geojson_polygons
Arguments:
-----------
filename : str
The name of the geojson file to be written to. MUST end in .geojson
geojson_polygons : dict
The dictionary containing the geojson data. Must contain all geojson within the features section of geojson.
perserve_id: boolean default=False
Boolean that if True perserves the property id and writes it to the geojson file
start_id: int default=0
Starts writing the ids of the geojson at this index
Returns:
--------
end_id : int
The (last id written to geojson). Intended to be used as the starting id for next iteration.
"""
features = []
count = 0
end_id = start_id
if not perserve_id:
for geoObj in geojson_polygons["features"]:
features.append(
Feature(
properties={
"id": count +
start_id},
geometry=geoObj["geometry"]))
count = count + 1
elif perserve_id:
for geoObj in geojson_polygons["features"]:
features.append(
Feature(
properties={
"id": geoObj["properties"]["id"]},
geometry=geoObj["geometry"]))
feature_collection = FeatureCollection(features)
with open(f'{filename}', 'w') as f:
dump(feature_collection, f)
end_id += count
return end_id
def get_linestring_list(vector_in_bbox_geojson: dict) -> list:
"""
Create a list of linestrings from the multilinestrings and linestrings that compose the vector
Arguments:
-----------
vector_in_bbox_geojson: dict
geojson vector
Returns:
-----------
lines_list: list
list of multiple shapely.geometry.linestring.LineString that represent each segment of the vector
"""
lines_list = []
length_vector_bbox_features = len(vector_in_bbox_geojson['features'])
length_vector_bbox_features
if(length_vector_bbox_features != 1):
for i in range(0, length_vector_bbox_features):
if vector_in_bbox_geojson['features'][i]['geometry']['type'] == 'MultiLineString':
for y in range(
len(vector_in_bbox_geojson['features'][i]['geometry']['coordinates'])):
line = LineString(
vector_in_bbox_geojson['features'][i]['geometry']['coordinates'][y])
lines_list.append(line)
elif vector_in_bbox_geojson['features'][i]['geometry']['type'] == 'LineString':
line = LineString(
vector_in_bbox_geojson['features'][i]['geometry']['coordinates'])
lines_list.append(line)
else:
for i in range(0, len(vector_in_bbox_geojson['features'])):
if vector_in_bbox_geojson['features'][0]['geometry']['type'] == 'MultiLineString':
for y in range(
len(vector_in_bbox_geojson['features'][0]['geometry']['coordinates'])):
line = LineString(
vector_in_bbox_geojson['features'][0]['geometry']['coordinates'][y])
lines_list.append(line)
elif vector_in_bbox_geojson['features'][i]['geometry']['type'] == 'LineString':
line = LineString(
vector_in_bbox_geojson['features'][i]['geometry']['coordinates'])
lines_list.append(line)
return lines_list
def get_geojson_polygons(linestring):
""" Returns the ROI rectangles in geojson"""
multipoint_list = interpolate_points(linestring)
tuples_list = convert_multipoints_to_tuples(multipoint_list)
geojson_polygons = create_reactangles(tuples_list)
return geojson_polygons
def interpolate_points(
line: "shapely.geometry.linestring.LineString",
num_pts=5) -> list:
"""
Create a list of multipoints for the interpolated points along each linestring
Arguments:
-----------
line: "shapely.geometry.linestring.LineString"
shapely.geometry.linestring.LineString that represents each segment of the coastline vector
num_pts: int
integer value representing the number of interpolated points created along the LineString
Returns:
-----------
multipoint_list: list
A list of multiple shapely.geometry.multipoint.MultiPoint
"""
# multipoint_list holds the multipoint for each feature of the coastline
# within the bbox
multipoint_list = []
distance_delta = line.length / num_pts
distances = np.arange(0, line.length, distance_delta)
if line.is_closed:
# Its a closed shape so its boundary points are NULL
boundary = shapely.geometry.Point(line.coords[0])
else:
boundary = line.boundary.geoms[0]
points = [line.interpolate(distance)
for distance in distances] + [boundary]
multipoint = unary_union(points)
multipoint_list.append(multipoint)
return multipoint_list
def convert_multipoints_to_tuples(multipoint_list: list) -> list:
"""
Create a list of tuples for the points in multipoint_list
Arguments:
-----------
multipoint_list: list
A list of multiple shapely.geometry.multipoint.MultiPoint
Returns:
-----------
tuples_list: list
A list of tuples each tuple represents a single point
"""
tuples_list = []
for multipoint in multipoint_list:
# Create an empty array to hold all the points as tuples
points_list = []
if isinstance(multipoint, shapely.geometry.Point):
point = multipoint
point_tuple = (point.coords[0][1], point.coords[0][0])
points_list.append(point_tuple)
else:
# First get each point from the multipoint object
points_array = [point for point in multipoint.geoms]
# For each point swap lat and lng because ipyleaflet swaps them
for point in points_array:
point_tuple = (point.coords[0][1], point.coords[0][0])
points_list.append(point_tuple)
tuples_list.append(points_list)
return tuples_list
def convert_corners_to_geojson(
upper_right_y: float,
upper_right_x: float,
upper_left_y: float,
upper_left_x: float,
lower_left_y: float,
lower_left_x: float,
lower_right_y: float,
lower_right_x: float) -> dict:
"""Convert the 4 corners of the rectangle into geojson """
geojson_feature = {}
geojson_feature["type"] = "Feature"
geojson_feature["properties"] = {}
geojson_feature["geometry"] = {}
geojson_polygon = {}
geojson_polygon["type"] = "Polygon"
geojson_polygon["coordinates"] = []
# The coordinates(which are 1,2 arrays) are nested within a parent array
nested_array = []
nested_array.append([upper_right_x, upper_right_y])
nested_array.append([upper_left_x, upper_left_y])
nested_array.append([lower_left_x, lower_left_y])
nested_array.append([lower_right_x, lower_right_y])
# GeoJson rectangles have the first point repeated again as the last point
nested_array.append([upper_right_x, upper_right_y])
geojson_polygon["coordinates"].append(nested_array)
geojson_feature["geometry"] = geojson_polygon
return geojson_feature
def create_reactangles(tuples_list: list, size: int = 0.04) -> dict:
"""
Create the geojson rectangles for each point in the tuples_list
Arguments:
-----------
tuples_list: list
list of tuples containing all the interpolated points along the given vector
size: float
A float that will be used as the multiplier for the ROI sizes
Returns:
-----------
geojson_polygons: dict
geojson dictionary contains all the rectangles generated
"""
geojson_polygons = {"type": "FeatureCollection", "features": []}
# Create a rectangle at each point on the line
# Swap the x and y for each point because ipyleaflet swaps them for draw
# methods
for points_list in tuples_list:
for point in points_list:
upper_right_x = point[0] - (size / 2)
upper_right_y = point[1] - (size / 2)
upper_left_x = point[0] + (size / 2)
upper_left_y = point[1] - (size / 2)
lower_left_x = point[0] + (size / 2)
lower_left_y = point[1] + (size / 2)
lower_right_x = point[0] - (size / 2)
lower_right_y = point[1] + (size / 2)
# Convert each set of points to geojson (DONT swap x and y this
# time)
geojson_polygon = convert_corners_to_geojson(
upper_right_x,
upper_right_y,
upper_left_x,
upper_left_y,
lower_left_x,
lower_left_y,
lower_right_x,
lower_right_y)
geojson_polygons["features"].append(geojson_polygon)
return geojson_polygons
# ROI OVERLAP RELATED FUNCTIONS
# -------------------------------------
def create_overlap(
filename: str,
line: "shapely.geometry.linestring.LineString",
start_id: int,
end_id_list: list):
"""
Check if all the ROI overlap with at both of its neighbors. If it doesn't increase the number of ROIs up to 26. If the number of ROIs exceed 26,
then no more will be drawn even if there is not enough overlap.
Arguments:
-----------
filename: str
The name of the file containing all the geojson data for all the ROIs generated.
line: shapely.geometry.linestring.LineString
represents each segment of the vector
start_id: int
the id number for the first geojson in the line
Returns:
-----------
Updated df_overlap.
"""
# Initial number of points interpolated along the line
num_pts = 5
# Boolean indicates whether every single ROI generated overlaps at least
# one other ROI
do_all_ROI_overlap = False
# Boolean indicates whether mean overlap was over 80% and if the number
# points is greater than 2
is_overlap_excessive = True
df_overlap = get_overlap_dataframe(filename)
# If df_overlap is means not a single ROI overlapped each other
if not df_overlap.empty:
df_all_ROIs = gpd.read_file(filename)
do_all_ROI_overlap = check_all_ROI_overlap(df_all_ROIs, df_overlap)
if do_all_ROI_overlap:
if check_average_ROI_overlap(df_overlap, .35):
# If the average overlap is over 35% decrease number of rois by
# 1
num_pts = adjust_num_pts(num_pts - 1)
is_overlap_excessive = True
# print(f"num_pts decreased to: {num_pts}")
if not do_all_ROI_overlap:
# If not all the rois overlap increase number of rois by 1
num_pts = adjust_num_pts(num_pts + 1)
# print(f"num_pts increased to: {num_pts}")
# Keep looping while not all the rois overlap and the average overlap is
# more than 80%
while do_all_ROI_overlap == False and is_overlap_excessive:
multipoint_list = interpolate_points(line, num_pts)
tuples_list = convert_multipoints_to_tuples(multipoint_list)
geojson_polygons = create_reactangles(tuples_list)
end_id = write_to_geojson_file(
filename,
geojson_polygons,
perserve_id=False,
start_id=start_id)
end_id_list.append(end_id)
# print(f"\nend_id_list {end_id_list}\n")
df_overlap = get_overlap_dataframe(filename)
# If df_overlap is empty means not a single ROI overlapped each other
if not df_overlap.empty:
df_all_ROIs = gpd.read_file(filename)
do_all_ROI_overlap = check_all_ROI_overlap(df_all_ROIs, df_overlap)
else:
do_all_ROI_overlap = False
if not do_all_ROI_overlap:
if num_pts == 1 or num_pts > 25:
# print(f"IN LOOP: num_pts is 1. BREAKING")
break # if the num_pts == 1 means no more ROIs should be removed
num_pts = adjust_num_pts(num_pts + 1)
else: # all ROIs overlap
if num_pts == 1 or num_pts > 25:
break # if the num_pts == 1 means no more ROIs should be removed
is_overlap_excessive = check_average_ROI_overlap(df_overlap, .35)
if is_overlap_excessive:
# If the average overlap is over 35% decrease number of rois by
# 1
num_pts = adjust_num_pts(num_pts - 1)
is_overlap_excessive = True
# print(f"IN LOOP: num_pts decreased to: {num_pts}")
return df_overlap
def check_average_ROI_overlap(df_overlap, percentage):
"""" Returns True if the mean overlap in the df is greater than percentage"""
mean_overlap = df_overlap["%_overlap"].mean()
if mean_overlap > percentage:
return True
return False
def adjust_num_pts(new_num_pts):
"""Rounds the number of points to a whole number 1<=x<=100"""
new_num_pts = int(round(new_num_pts, 0))
if new_num_pts < 1:
new_num_pts = 1
elif new_num_pts > 100:
new_num_pts = 100
else:
return new_num_pts
def check_all_ROI_overlap(df_all_ROIs, df_overlap):
"""Compares the IDs of the ROIs in df_overlap(contains only the ids of the overlapping ROIs), to df_all_rois(contains the ids of all ROIs)
Returns
True: If all the IDs in df_all_ROIs are also in df_overlap
False: If NOT all the IDs in df_all_ROIs are also in df_overlap"""
all_ids_list = list(df_all_ROIs["id"])
# print(f"\n all_ids_list:{all_ids_list}\n")
overlapping_ids = df_overlap["primary_id"]
# print(f"\n overlapping_ids:\n{overlapping_ids}\n")
missing_list = list(set(all_ids_list) - set(overlapping_ids))
if missing_list == []:
return True
return False
``` |
{
"source": "2320sharon/segmentation_zoo-1",
"score": 2
} |
#### File: segmentation_zoo-1/utilities/download_sample_data.py
```python
import sys,os, time
from tkinter import filedialog
from tkinter import *
from tkinter import messagebox
import requests, zipfile, io
from glob import glob
def download_url(url, save_path, chunk_size=128):
r = requests.get(url, stream=True)
with open(save_path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
#### choose zenodo release
root = Tk()
choices = ['landsat_6229071', 'landsat_6230083', 'coin_6229579', 'aerial_6234122', 'aerial_6235090']
variable = StringVar(root)
variable.set('landsat_6229071')
w = OptionMenu(root, variable, *choices)
w.pack(); root.mainloop()
dataset_id = variable.get()
print("Dataset ID : {}".format(dataset_id))
zenodo_id = dataset_id.split('_')[-1]
####======================================
try:
os.mkdir('../sample_data')
except:
pass
try:
os.mkdir('../sample_data/'+dataset_id)
except:
pass
model_direc = '../sample_data/'+dataset_id
root_url = 'https://zenodo.org/record/'+zenodo_id+'/files/'
filename='data_sample.zip'
weights_direc = model_direc + os.sep + 'data_sample'
url=(root_url+filename)
print('Retrieving data {} ...'.format(url))
outfile = model_direc + os.sep + filename
if not os.path.exists(outfile):
print('Retrieving data {} ...'.format(url))
download_url(url, outfile)
print('Unzipping data to {} ...'.format(model_direc))
with zipfile.ZipFile(outfile, 'r') as zip_ref:
zip_ref.extractall(model_direc)
``` |
{
"source": "23233/sproxy",
"score": 3
} |
#### File: Fetcher/fetchers/89ip.py
```python
import re
from Util.WebRequest import WebRequest
from Util.utilFunction import getHtmlTree
class CustomFetcher():
fetcher_host = "www.89ip.cn"
def run(self):
url = 'http://www.89ip.cn/tqdl.html?num=2500&address=&kill_address=&port=&kill_port=&isp='
html_tree = getHtmlTree(url)
data_warp = html_tree.xpath("//div[@class='fly-panel']/div[@style='padding-left:20px;']//text()")
for data in data_warp:
if ':' in data:
yield data.strip()
```
#### File: Fetcher/fetchers/ip3366.py
```python
import re
from Util.WebRequest import WebRequest
from Util.utilFunction import getHtmlTree
class CustomFetcher():
fetcher_host = "www.ip3366.net"
def run(self):
urls = ['http://www.ip3366.net/free/']
request = WebRequest()
for url in urls:
r = request.get(url)
proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
for proxy in proxies:
yield ":".join(proxy)
```
#### File: Src/Log/LogHandler.py
```python
import os
import logging
from logging.handlers import TimedRotatingFileHandler
from Config import ConfigManager
LOG_LEVEL = {
"CRITICAL": 50,
"FATAL": 50,
"ERROR": 40,
"WARNING": 30,
"WARN": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0,
}
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(CURRENT_PATH, os.pardir, os.pardir)
LOG_PATH = os.path.join(ROOT_PATH, 'logs')
if not os.path.exists(LOG_PATH):
os.mkdir(LOG_PATH)
class LogHandler(logging.Logger):
def __init__(self, level=None, stream=True, file=True):
self.name = "ProxyPool"
if level:
self.level = level
else:
self.level = LOG_LEVEL.get(ConfigManager.base_config.setting.get("log_level"), LOG_LEVEL["INFO"])
super(LogHandler, self).__init__(self.name, level=self.level)
if stream:
self.__setStreamHandler__()
if file:
self.__setFileHandler__()
def __setFileHandler__(self, level=None):
file_name = os.path.join(LOG_PATH, '{name}.log'.format(name=self.name))
# 设置日志回滚, 保存在log目录, 一天保存一个文件, 保留15天
file_handler = TimedRotatingFileHandler(filename=file_name, when='D', interval=1, backupCount=15)
file_handler.suffix = '%Y%m%d.log'
if not level:
file_handler.setLevel(self.level)
else:
file_handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
self.file_handler = file_handler
self.addHandler(file_handler)
def __setStreamHandler__(self, level=None):
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
stream_handler.setFormatter(formatter)
if not level:
stream_handler.setLevel(self.level)
else:
stream_handler.setLevel(level)
self.addHandler(stream_handler)
def resetName(self, name):
self.name = name
self.removeHandler(self.file_handler)
self.__setFileHandler__()
if __name__ == '__main__':
log = LogHandler()
log.info('this is a test msg')
```
#### File: Src/Manager/ProxyClean.py
```python
import sys
sys.path.append("Src")
import time
import threading
from Manager.ProxyManager import proxy_manager
from Log.LogManager import log
from Config import ConfigManager
try:
from Queue import Queue # py3
except:
from queue import Queue # py2
# 这样的实现多线程有问题, 后期无法扩展到独立的进程.
# must call classmethod initQueue before when thread start
class ProxyClean(threading.Thread):
def __init__(self, **kwargs):
super(ProxyClean, self).__init__(**kwargs)
class ProxyCleanUseful(ProxyClean):
def run(self):
hold_number = ConfigManager.setting_config.setting.get("hold_useful_proxy_number")
total_number = proxy_manager.getUsefulProxyNumber()
clean_number = proxy_manager.cleanUsefulProxy(hold_number=hold_number)
log.info("clean useful, total_number:{total_number}, clean_number:{clean_number}, hold_number:{hold_number}".format(total_number=total_number, clean_number=clean_number, hold_number=hold_number))
class ProxyCleanRaw(ProxyClean):
def run(self):
total_number = proxy_manager.getRawProxyNumber()
clean_number = proxy_manager.cleanRawProxy()
remain_number = total_number - clean_number
log.info("clean raw_proxy, total_number:{total_number}, clean_number:{clean_number}, remain_number:{remain_number}".format(total_number=total_number, clean_number=clean_number, remain_number=remain_number))
if __name__ == "__main__":
t1 = ProxyCleanUseful()
t1.daemon = True
t1.start()
t2 = ProxyCleanRaw()
t2.daemon = True
t2.start()
t1.join()
t2.join()
```
#### File: Src/Manager/ProxyFetch.py
```python
from gevent import monkey, pool
monkey.patch_all()
import sys
sys.path.append("Src")
import time
import threading
import gevent
from Manager import ProxyManager
# from ProxyGetter.getFreeProxy import GetFreeProxy
from Fetcher import FetcherManager
from Log.LogManager import log
from Config import ConfigManager
from Util.utilFunction import verifyProxyFormat
try:
from Queue import Queue # py3
except:
from queue import Queue # py2
# 这样的实现多线程有问题, 后期无法扩展到独立的机器上.
# must call classmethod initQueue before
class ProxyFetch(object):
queue = Queue()
@classmethod
def initQueue(cls):
fetchers = ProxyManager.proxy_manager.getExecFetcher()
for fetcher in fetchers:
cls.queue.put(fetcher)
def start(self):
self.start_time = int(time.time())
concurrency = ConfigManager.setting_config.setting.get("fetch_new_proxy_concurrency")
task_pool = pool.Pool(concurrency)
queue_size = self.queue.qsize()
if queue_size > 0:
greenlet_list = []
for _ in range(queue_size):
greenlet_list.append(task_pool.spawn(self.fetch))
gevent.joinall(greenlet_list)
else:
log.info("Not Have Fetcher Of Now, skip!")
def fetch(self):
start_time = time.time()
total = 0
succ = 0
fail = 0
skip = 0
fetcher = self.queue.get()
name = fetcher["name"]
fetcher_class = FetcherManager.getFetcherClass(name)
log.debug("fetch [{name}] proxy start".format(name=name))
try:
f = fetcher_class()
for proxy in f.run():
proxy = proxy.strip()
if proxy and verifyProxyFormat(proxy) and \
not ProxyManager.proxy_manager.checkUsefulProxyExists(proxy):
ProxyManager.proxy_manager.saveUsefulProxy(proxy)
succ = succ + 1
log.debug("fetch [{name}] proxy {proxy} succ".format(name=name, proxy=proxy))
else:
skip = skip + 1
log.debug("fetch [{name}] proxy {proxy} skip".format(name=name, proxy=proxy))
total = total + 1
except Exception as e:
log.error("fetch [{name}] proxy fail: {error}".format(name=name, error=e))
fail = fail + 1
self.queue.task_done()
now = int(time.time())
elapsed_time = int(now - start_time)
next_fetch_time = self.start_time + (fetcher["interval"] * 60)
data = {
"$inc": {
"succ": succ,
"fail": fail,
"skip": skip,
"total": total,
},
"$set": {
"next_fetch_time": next_fetch_time,
}
}
ProxyManager.proxy_manager.updateFetcher(name, data)
log.info("fetch [{name:^15}] proxy finish, \
total:{total}, succ:{succ}, fail:{fail}, skip:{skip}, elapsed_time:{elapsed_time}s". \
format(name=name, total=total, succ=succ, fail=fail, skip=skip, elapsed_time=elapsed_time))
def run(self):
while self.queue.qsize():
self.fetch()
if __name__ == "__main__":
ProxyFetch.initQueue()
t = ProxyFetch()
t.start()
```
#### File: Src/Util/WebRequest.py
```python
from requests.models import Response
import requests
import random
import time
class WebRequest(object):
def __init__(self, *args, **kwargs):
pass
@property
def user_agent(self):
"""
return an User-Agent at random
:return:
"""
ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
]
return random.choice(ua_list)
@property
def header(self):
"""
basic header
:return:
"""
return {'User-Agent': self.user_agent,
'Accept': '*/*',
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.8'}
def get(self, url, header=None, retry_time=1, timeout=10,
*args, **kwargs):
headers = self.header
if header and isinstance(header, dict):
headers.update(header)
try:
resp = requests.get(url, headers=headers, timeout=timeout, **kwargs)
except Exception as e:
# print("request url error", url, e)
resp = Response()
resp.status_code = 504
return resp
``` |
{
"source": "232525/UCAS-srun-login-script",
"score": 2
} |
#### File: UCAS-srun-login-script/UCASSrunLogin/LoginManager.py
```python
import requests
import time
import re
from ._decorators import *
from .encryption.srun_md5 import *
from .encryption.srun_sha1 import *
from .encryption.srun_base64 import *
from .encryption.srun_xencode import *
header={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36'
}
class LoginManager:
def __init__(self,
login_ip = "http://124.16.81.61/",
n = "200",
vtype = "1",
acid = "1",
enc = "srun_bx1"
):
url_login_page = login_ip + "srun_portal_pc?ac_id=1&theme=pro"
url_get_challenge_api = login_ip + "cgi-bin/get_challenge"
url_login_api = login_ip + "cgi-bin/srun_portal"
# urls
self.url_login_page = url_login_page
self.url_get_challenge_api = url_get_challenge_api
self.url_login_api = url_login_api
# other static parameters
self.n = n
self.vtype = vtype
self.ac_id = acid
self.enc = enc
def login(self, username, password):
self.username = username
self.password = password
self.get_ip()
self.get_token()
self.get_login_responce()
def get_ip(self):
print("Step1: Get local ip returned from srun server.")
self._get_login_page()
self._resolve_ip_from_login_page()
print("----------------")
def get_token(self):
print("Step2: Get token by resolving challenge result.")
self._get_challenge()
self._resolve_token_from_challenge_response()
print("----------------")
def get_login_responce(self):
print("Step3: Loggin and resolve response.")
self._generate_encrypted_login_info()
self._send_login_info()
self._resolve_login_responce()
print("The loggin result is: " + self._login_result)
print("----------------")
def _is_defined(self, varname):
"""
Check whether variable is defined in the object
"""
allvars = vars(self)
return varname in allvars
@infomanage(
callinfo = "Getting login page",
successinfo = "Successfully get login page",
errorinfo = "Failed to get login page, maybe the login page url is not correct"
)
def _get_login_page(self):
# Step1: Get login page
self._page_response = requests.get(self.url_login_page, headers=header)
@checkvars(
varlist = "_page_response",
errorinfo = "Lack of login page html. Need to run '_get_login_page' in advance to get it"
)
@infomanage(
callinfo = "Resolving IP from login page html",
successinfo = "Successfully resolve IP",
errorinfo = "Failed to resolve IP"
)
def _resolve_ip_from_login_page(self):
# self.ip = re.search('id="user_ip" value="(.*?)"', self._page_response.text).group(1)
self.ip = re.search('ip(.*):(.*)"(.*?)"', self._page_response.text).group(1)
@checkip
@infomanage(
callinfo = "Begin getting challenge",
successinfo = "Challenge response successfully received",
errorinfo = "Failed to get challenge response, maybe the url_get_challenge_api is not correct." \
"Else check params_get_challenge"
)
def _get_challenge(self):
"""
The 'get_challenge' request aims to ask the server to generate a token
"""
params_get_challenge = {
"callback": "jsonp1583251661367", # This value can be any string, but cannot be absent
"username": self.username,
"ip": self.ip
}
self._challenge_response = requests.get(self.url_get_challenge_api, params=params_get_challenge, headers=header)
@checkvars(
varlist = "_challenge_response",
errorinfo = "Lack of challenge response. Need to run '_get_challenge' in advance"
)
@infomanage(
callinfo = "Resolving token from challenge response",
successinfo = "Successfully resolve token",
errorinfo = "Failed to resolve token"
)
def _resolve_token_from_challenge_response(self):
self.token = re.search('"challenge":"(.*?)"', self._challenge_response.text).group(1)
@checkip
def _generate_info(self):
info_params = {
"username": self.username,
"password": <PASSWORD>,
"ip": self.ip,
"acid": self.ac_id,
"enc_ver": self.enc
}
info = re.sub("'",'"',str(info_params))
self.info = re.sub(" ",'',info)
@checkinfo
@checktoken
def _encrypt_info(self):
self.encrypted_info = "{SRBX1}" + get_base64(get_xencode(self.info, self.token))
@checktoken
def _generate_md5(self):
self.md5 = get_md5("", self.token)
@checkmd5
def _encrypt_md5(self):
self.encrypted_md5 = "{MD5}" + self.md5
@checktoken
@checkip
@checkencryptedinfo
def _generate_chksum(self):
self.chkstr = self.token + self.username
self.chkstr += self.token + self.md5
self.chkstr += self.token + self.ac_id
self.chkstr += self.token + self.ip
self.chkstr += self.token + self.n
self.chkstr += self.token + self.vtype
self.chkstr += self.token + self.encrypted_info
@checkchkstr
def _encrypt_chksum(self):
self.encrypted_chkstr = get_sha1(self.chkstr)
def _generate_encrypted_login_info(self):
self._generate_info()
self._encrypt_info()
self._generate_md5()
self._encrypt_md5()
self._generate_chksum()
self._encrypt_chksum()
@checkip
@checkencryptedmd5
@checkencryptedinfo
@checkencryptedchkstr
@infomanage(
callinfo = "Begin to send login info",
successinfo = "Login info send successfully",
errorinfo = "Failed to send login info"
)
def _send_login_info(self):
login_info_params = {
'callback': 'jsonp1583251661368', # This value can be any string, but cannot be absent
'action':'login',
'username': self.username,
'password': <PASSWORD>,
'ac_id': self.ac_id,
'ip': self.ip,
'info': self.encrypted_info,
'chksum': self.encrypted_chkstr,
'n': self.n,
'type': self.vtype
}
self._login_responce = requests.get(self.url_login_api, params=login_info_params, headers=header)
@checkvars(
varlist = "_login_responce",
errorinfo = "Need _login_responce. Run _send_login_info in advance"
)
@infomanage(
callinfo = "Resolving login result",
successinfo = "Login result successfully resolved",
errorinfo = "Cannot resolve login result. Maybe the srun response format is changed"
)
def _resolve_login_responce(self):
self._login_result = re.search('"suc_msg":"(.*?)"', self._login_responce.text).group(1)
``` |
{
"source": "2326wz/sharp-in",
"score": 2
} |
#### File: v0/routes/ping.py
```python
from fastapi import APIRouter, HTTPException, Request, Response
from core.config import get_config
from core.api_models.common import Pong
router = APIRouter()
@router.get("/ping", response_model=Pong)
async def root() -> Pong:
project_name = get_config().project_name
print(project_name)
version = get_config().version
return Pong(pong=f"{project_name}, version {version}")
```
#### File: sharp-in/telegram-bot/server.py
```python
from fastapi import FastAPI
import telebot
from api.v0.routes.api import router as api_router
from core.config import get_config
def get_application() -> FastAPI:
project_name = get_config().project_name
debug = get_config().debug
version = get_config().version
prefix = get_config().api_prefix
application = FastAPI(title=project_name, debug=debug, version=version)
application.include_router(api_router, prefix=prefix)
return application
# TG bot setup
WEBHOOK_HOST = get_config().WEBHOOK_HOST
WEBHOOK_PORT = 443
WEBHOOK_URL_BASE = "https://%s:%s" % (WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/%s" % ("api/v0/tg")
bot = telebot.TeleBot(get_config().token)
bot.remove_webhook()
bot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH)
app = get_application()
``` |
{
"source": "2339145977/discordpy-startup",
"score": 3
} |
#### File: 2339145977/discordpy-startup/discordbot.py
```python
from os import getenv
import traceback
import random
import discord
PREFIX = "yz "
class MyClient(discord.Client):
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author.id == self.user.id:
return
if message.content.startswith(PREFIX + '你好'):
await message.reply('你好~', mention_author=True)
if message.content.startswith('难') and len(message.content) < 3:
await message.channel.send('晴天好心情猜中了!')
if message.content.startswith(PREFIX + '涩图'):
await message.channel.send('https://nekos.life/lewd')
if message.content.startswith(PREFIX + "晚安"):
await message.reply('晚安~', mention_author=True)
token = getenv('DISCORD_BOT_TOKEN')
client = MyClient()
client.run(token)
``` |
{
"source": "233a344a455/nbutils",
"score": 2
} |
#### File: nbutils/nonutils/persistent.py
```python
import json
from pathlib import Path
from typing import Dict, Any
from pydantic import BaseSettings
pref_dir: Path = Path("preferences")
def json_config_settings_source(pref: 'Preference') -> Dict[str, Any]:
if pref._perf_path.exists():
return json.loads(pref._perf_path.read_text(encoding='utf-8'))
else:
return {}
class Preference(BaseSettings):
_initialized = False
_perf_path: Path
def __dump(self):
self._perf_path.write_text(self.json(), encoding='utf-8')
def __init__(self, pref_name: str, *args, **kwargs):
self._perf_path = pref_dir / f"{pref_name}.json"
pref_dir.mkdir(parents=True, exist_ok=True)
super().__init__(*args, **kwargs)
self._initialized = True
def __setattr__(self, name, value):
# Dump preference when a attr is changed
super().__setattr__(name, value)
if getattr(self, '_initialized', False):
self.__dump()
class Config:
underscore_attrs_are_private = True
@classmethod
def customise_sources(
cls,
init_settings,
env_settings,
file_secret_settings,
):
return (
init_settings,
json_config_settings_source,
env_settings,
file_secret_settings,
)
``` |
{
"source": "233a344a455/RobomasterEPlib",
"score": 3
} |
#### File: 233a344a455/RobomasterEPlib/interact.py
```python
import rmepy
rm = rmepy.Robot()
rm.start()
print(rm.send_msg('SDK version: ')[1])
def send_raw():
print("Enter send raw command mode.")
while True:
i = input(' > ')
if i == 'e':
return
print(rm.send_msg(i))
print("Exit send raw command mode.")
```
#### File: RobomasterEPlib/rmepy/logger.py
```python
import traceback
class Logger():
def __init__(self, name):
if name.__class__.__name__ == 'str':
self.name = name
else:
self.name = name.__class__.__name__
self.level = 'DEBUG'
def info(self, msg):
"""Print infos.
Args:
msg: (str) 输出的info信息
Returns:
None
"""
if self._log_level <= 1:
print("\033[0;36m" + "[Info]%s: %s" % (self.name, msg) + "\033[0m")
def warn(self, msg):
"""Print warnings.
Args:
msg: (str) 输出的警告信息
Returns:
None
"""
if self._log_level <= 2:
print("\033[33m" + "[Warning]%s: %s" % (self.name, msg) + "\033[0m")
def error(self, msg):
"""Print errors.
输出错误并提供traceback或者强制继续
Args:
msg: (str) 输出的错误信息
Returns:
None
"""
print("=============================================")
print("\033[0;31m" + "[Error]%s: %s" % (self.name, msg) + "\033[0m")
temp = input("Force to continue? ('y' to continue / 'n' to print Traceback) ")
print("=============================================")
if temp.upper() != 'Y':
print("\n\033[0;31m" + "Traceback (most recent call last):" + "\033[0m")
for line in traceback.format_stack()[:-1]:
print("\033[31m" + line + "\033[0m")
print("\n=============================================")
exit()
def debuginfo(self, msg):
"""Print debug msg.
Args:
msg: (str) 输出的调试信息
Returns:
None
"""
if self._log_level == 0:
print("\033[2m" + "[Debug]%s: %s" % (self.name, msg) + "\033[0m")
def debug(self, msg):
"""Print highlighted debug msg.
Args:
msg: (str) 输出的调试信息
Returns:
None
"""
print("\033[7m" + "[Debug]%s: %s" % (self.name, msg) + "\033[0m")
@property
def level(self):
return ('DEBUG', 'INFO', 'WARNING', 'ERROR')[self._log_level]
@level.setter
def level(self, level):
try:
level = ('DEBUG', 'INFO', 'WARNING', 'ERROR').index(level)
except ValueError:
print("\033[33m" + "[Warning]Logger: Invalid log level %r." %level + "\033[0m")
else:
self._log_level = level
if __name__ == '__main__':
log = Logger('test')
log.level = 'WARNING'
log.info('test')
log.warn('test')
log.debuginfo('test')
log.debug('test')
print('aaaa')
log.error('test')
```
#### File: rmepy/robot_modules/basic_ctrl.py
```python
from .__module_template import RobotModuleTemplate
from ..decorators import accepts, retry
class BasicCtrl(RobotModuleTemplate):
def __init__(self, robot):
super().__init__(robot)
@retry(n_retries=1e5)
def enter_sdk_mode(self):
"""控制机器人进入 SDK 模式
当机器人成功进入 SDK 模式后,才可以响应其余控制命令
Args:
None
Returns:
None
"""
resp = self._send_msg('command')[1]
if resp == 'ok':
self._log.info('Enter sdk mode successfully.')
return True
else:
self._log.warn('Failed to enter sdk mode: %s' %resp)
return False
def quit_cmd_mode(self):
"""退出 SDK 模式
控制机器人退出 SDK 模式,重置所有设置项
Wi-Fi/USB 连接模式下,当连接断开时,机器人会自动退出 SDK 模式
Args:
None
Returns:
None
"""
return self._send_cmd('quit')
@accepts((int, 0, 2))
def set_robot_mode(self, mode):
"""设置机器人的运动模式
机器人运动模式描述了云台与底盘之前相互作用与相互运动的关系,
每种机器人模式都对应了特定的作用关系。
Args:
mode (enum): 机器人运动模式
{0:云台跟随底盘模式, 1:底盘跟随云台模式, 2:自由模式}
Returns:
None
"""
mode_enum = ('chassis_lead', 'gimbal_lead', 'free')
return self._send_cmd('robot mode ' + mode_enum[mode])
def get_robot_mode(self):
"""获取机器人运动模式
查询当前机器人运动模式
机器人运动模式描述了云台与底盘之前相互作用与相互运动的关系,
每种机器人模式都对应了特定的作用关系。
Args:
None
Returns:
(int): 机器人的运动模式
{0:云台跟随底盘模式, 1:底盘跟随云台模式, 2:自由模式}
"""
mode_enum = ('chassis_lead', 'gimbal_lead', 'free')
return mode_enum.index(self._send_cmd('robot mode ?'))
def video_stream_on(self):
"""开启视频流推送
打开视频流
打开后,可从视频流端口接收到 H.264 编码的码流数据
Args:
None
Returns:
None
"""
return self._send_cmd('stream on')
def video_stream_off(self):
"""关闭视频流推送
关闭视频流
关闭视频流后,H.264 编码的码流数据将会停止输出
Args:
None
Returns:
None
"""
return self._send_cmd('stream off')
```
#### File: rmepy/robot_modules/chassis.py
```python
from .__module_template import RobotModuleTemplate
from ..decorators import accepts
class Chassis(RobotModuleTemplate):
def __init__(self, robot):
super().__init__(robot)
# position 底盘当前的位置距离上电时刻位置
self.x = 0.0 # x 轴位置(m)
self.y = 0.0 # y 轴位置(m)
# attitude
self.pitch = 0.0
self.roll = 0.0
self.yaw = 0.0
# status
self.is_static = False #是否静止
self.is_uphill = False #是否上坡
self.is_downhill = False #是否下坡
self.is_on_slope = False #是否溜坡
self.is_pick_up = False #是否被拿起
self.is_slip = False #是否滑行
self.is_impact_x = False #x 轴是否感应到撞击
self.is_impact_y = False #y 轴是否感应到撞击
self.is_impact_z = False #z 轴是否感应到撞击
self.is_roll_over = False #是否翻车
self.is_hill_static = False #是否在坡上静止
@accepts(speed_x=(float, -3.5, 3.5), speed_y=(float, -3.5, 3.5), speed_yaw=(float, -600, 600))
def set_speed(self, speed_x, speed_y, speed_yaw):
"""底盘运动速度控制
控制底盘运动速度
Args:
speed_x (float:[-3.5,3.5]): x 轴向运动速度,单位 m/s
speed_y (float:[-3.5,3.5]): y 轴向运动速度,单位 m/s
speed_yaw (float:[-600,600]): z 轴向旋转速度,单位 °/s
Returns:
None
"""
return self._send_cmd('chassis speed x %f y %f z %f' % (speed_x, speed_y, speed_yaw))
@accepts((int, -1000, 1000), (int, -1000, 1000), (int, -1000, 1000), (int, -1000, 1000))
def set_wheel_speed(self, speed_w1, speed_w2, speed_w3, speed_w4):
"""底盘轮子速度控制
控制四个轮子的速度
Args:
speed_w1 (int:[-1000, 1000]): 右前麦轮速度,单位 rpm
speed_w2 (int:[-1000, 1000]): 左前麦轮速度,单位 rpm
speed_w3 (int:[-1000, 1000]): 右后麦轮速度,单位 rpm
speed_w4 (int:[-1000, 1000]): 左后麦轮速度,单位 rpm
Returns:
None
"""
return self._send_cmd('chassis wheel w1 %d w2 %d w3 %d w4 %d' % (speed_w1, speed_w2, speed_w3, speed_w4))
@accepts((float, -5, 5), (float, -5, 5), (int, -1800, 1800), (float, 0, 3.5), (float, 0, 600))
def shift(self, x=0., y=0., yaw=0, speed_xy=0.5, speed_yaw=90.):
"""底盘相对位置控制
控制底盘运动当指定位置,坐标轴原点为当前位置
Args:
x (float:[-5, 5]): x 轴向运动距离,单位 m
y (float:[-5, 5]): y 轴向运动距离,单位 m
yaw (int:[-1800, 1800]): z 轴向旋转角度,单位 °
speed_xy (float:(0, 3.5]): xy 轴向运动速度,单位 m/s
speed_yaw (float:(0, 600]): z 轴向旋转速度, 单位 °/s
Returns:
None
"""
return self._send_cmd('chassis move x %f y %f z %d vxy %f vz %f' % (x, y, yaw, speed_xy, speed_yaw))
def get_all_speed(self):
"""底盘速度信息获取
获取底盘的速度信息
Args:
None
Returns:
(list) [
speed_x (float): x轴向的运动速度,单位 m/s
speed_y (float): y轴向的运动速度,单位 m/s
speed_yaw (float): z轴向的旋转速度,单位 °/s
speed_w1 (int): 右前麦轮速度,单位 rpm
speed_w2 (int): 左前麦轮速度,单位 rpm
speed_w3 (int): 右后麦轮速度,单位 rpm
speed_w4 (int): 左后麦轮速度,单位 rpm
]
"""
response = self._send_query('chassis speed ?')
return self._process_response(response, (float, float, float, int, int, int, int))
def get_speed(self):
"""获取机器人运动速度
获取机器人整体的速度信息
Args:
None
Returns:
(list) [
speed_x (float): x轴向的运动速度,单位 m/s
speed_y (float): y轴向的运动速度,单位 m/s
speed_yaw (float): z轴向的旋转速度,单位 °/s
]
"""
return self.get_all_speed()[:3]
def get_wheel_speed(self):
"""获取麦轮速度
获取麦轮的速度信息
Args:
None
Returns:
(list) [
speed_w1 (int): 右前麦轮速度,单位 rpm
speed_w2 (int): 左前麦轮速度,单位 rpm
speed_w3 (int): 右后麦轮速度,单位 rpm
speed_w4 (int): 左后麦轮速度,单位 rpm
]
"""
return self.get_all_speed()[3:]
def get_postion(self):
"""底盘位置信息获取
获取底盘的位置信息
上电时刻机器人所在的位置为坐标原点
Args:
None
Returns:
(list) [
x (float): x轴向的位移
y (float): y轴向的位移
z (float): z轴向的位移
]
"""
response = self._send_query('chassis position ?')
return self._process_response(response, float)
def get_attitude(self):
"""获取底盘姿态信息
查询底盘的姿态信息
Args:
None
Returns: #TODO:确定返回值为 int 还是 float
(list) [
pitch (float): pitch 轴角度,单位 °
roll (float): roll 轴角度,单位 °
yaw (float): yaw 轴角度,单位 °
]
"""
response = self._send_query('chassis attitude ?')
return self._process_response(response, float)
def get_status(self):
"""获取底盘状态信息
获取底盘状态信息
Args:
None
Returns:
(list) [
static (bool): 是否静止
uphill (bool): 是否上坡
downhill (bool): 是否下坡
on_slope (bool): 是否溜坡
pick_up (bool): 是否被拿起
slip (bool): 是否滑行
impact_x (bool): x 轴是否感应到撞击
impact_y (bool): y 轴是否感应到撞击
impact_z (bool): z 轴是否感应到撞击
roll_over (bool): 是否翻车
hill_static (bool): 是否在坡上静止
]
"""
response = self._send_query('chassis status ?')
return self._process_response(response, bool)
def set_push(self, pos_freq=None, atti_freq=None, status_freq=None):
"""底盘信息推送控制
打开/关闭底盘中相应属性的信息推送与频率设置
若已使用 Robot.push.start 激活推送接收器
可以直接从 Robot.chassis.[属性名] 获取推送信息
Args:
支持的频率 1, 5, 10, 20, 30, 50
频率为 0 则关闭该属性推送
若输入为 None 则不改变该属性的推送评论
pos_freq (int/None)
atti_freq (int/None)
status_freq (int/None)
Returns:
None
"""
if pos_freq == atti_freq == status_freq == None:
self._log.warn("set_push: got 3 None args." )
return False
else:
cmd = 'chassis push'
if pos_freq == None:
pass
elif pos_freq == 0:
cmd += ' position off'
elif pos_freq in (1, 5, 10, 20, 30, 50):
cmd += ' position on pfreq %d' %pos_freq
else:
self._log.error("set_push: 'pos_freq' should be an integer in (0, 1, 5, 10, 20, 30, 50), but got %r" %pos_freq)
if atti_freq == None:
pass
elif atti_freq == 0:
cmd += ' attitude off'
elif atti_freq in (1, 5, 10, 20, 30, 50):
cmd += ' attitude on afreq %d' %atti_freq
else:
self._log.error("set_push: 'atti_freq' should be an integer in (0, 1, 5, 10, 20, 30, 50), but got %r" %atti_freq)
if status_freq == None:
pass
elif status_freq == 0:
cmd += ' status off'
elif status_freq in (1, 5, 10, 20, 30, 50):
cmd += ' status on sfreq %d' %status_freq
else:
self._log.error("set_push: 'status_freq' should be an integer in (0, 1, 5, 10, 20, 30, 50), but got %r" %status_freq)
return self._send_cmd(cmd)
``` |
{
"source": "2350849683/-short_url",
"score": 2
} |
#### File: -short_url/app/__init__.py
```python
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
Config.init_app(app)
db.init_app(app)
from app.service import service
app.register_blueprint(service)
return app
``` |
{
"source": "23580/my-Ai-project",
"score": 3
} |
#### File: 23580/my-Ai-project/myfriend.py
```python
import speech_recognition as sr
import pyttsx3
import datetime
import pywhatkit
import wikipedia
import pyjokes
import pyaudio
import webbrowser
import os
listener = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning sir ")
elif hour>=12 and hour<18:
speak("Good Afternoon sir")
else:
speak("Good Evening sir")
speak("I am Jarvis here. Please tell me how may I help you.")
def take_command():
try:
with sr.Microphone() as source:
print('listening...')
voice = listener.listen(source)
command = listener.recognize_google(voice)
command = command.lower()
if 'jarvis' in command:
command = command.replace('jarvis', '')
print(command)
except:
pass
return command
def run_alexa():
command = take_command()
print(command)
if 'play' in command:
song = command.replace('play', '')
speak('playing ' + song)
pywhatkit.playonyt(song)
elif 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
speak('Sir,Current time is ' + time)
elif 'who is' in command:
person = command.replace('who is', '')
info = wikipedia.summary(person, 1)
print(info)
speak(info)
elif 'date' in command:
speak('sorry sir, I have a headache')
elif 'are you single' in command:
speak('sorry sir! im in relationship with alexa')
elif 'joke' in command:
speak(pyjokes.get_joke())
elif 'open youtube' in command:
webbrowser.open("youtube.com")
speak('opening youtube for you sir')
elif 'open google' in command:
webbrowser.open("google.com")
speak('opening google for you sir')
elif 'open gmail' in command:
webbrowser.open("gmail.com")
speak('opening gmail for you sir')
elif 'feeling so sad' in command:
music_dir = 'C:\\Users\\91967\\Music\\Playlists\\'
songs = os.listdir(music_dir)
speak('playing songs in me to change your mood sir')
os.startfile(os.path.join(music_dir, songs[0]))
elif 'open vs code' in command:
codePath = "C:\\Users\\91967\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
speak('opening visual code edtior sir!')
elif 'hey jarvis' in command:
speak('yes sir im here ,tell me ')
else:
speak('Please say the command again.')
if __name__ == "__main__":
wishMe()
while True:
run_alexa()
``` |
{
"source": "2360673637/AIGames",
"score": 3
} |
#### File: Algorithm_1/nets/nets.py
```python
import os
import json
import keras
import random
from collections import deque
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Activation, Conv2D, Flatten, Dense, MaxPooling2D
import sys
sys.path.append('..')
from utils.utils import *
'''
# for model_2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
'''
'''
Function:
模型一: Keras框架搭建, 简单测试用
'''
class model_1():
def __init__(self, options, with_pool=False):
if with_pool:
self.model = self.build_pool_model(options)
else:
self.model = self.build_model(options)
self.save_model_info()
# 创建模型
def build_model(self, options):
print('[INFO]: Start to build model_1 without pool...')
model = Sequential()
model.add(Conv2D(32, (8, 8), strides=(4, 4), padding='same', input_shape=options['input_shape']))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4), strides=(2, 2), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4), strides=(1, 1), padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(options['num_actions']))
optim = Adam(lr=options['lr'])
model.compile(loss='mse', optimizer=optim)
return model
# 创建带池化层的model
def build_pool_model(self, options):
print('[INFO]: Start to build model_1 with pool...')
model = Sequential()
model.add(Conv2D(32, (8, 8), strides=(4, 4), padding='same', input_shape=options['input_shape']))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4), strides=(2, 2), padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(options['num_actions']))
optim = Adam(lr=options['lr'])
model.compile(loss='mse', optimizer=optim)
return model
# 模型训练
def train(self, agent, options, batch_idx=0):
Data_deque = deque()
height = options['input_shape'][0]
width = options['input_shape'][1]
num_channels = options['input_shape'][2]
# 小恐龙跳跃的概率
start_prob_jump = options['start_prob_jump']
end_prob_jump = options['end_prob_jump']
interval_prob = options['interval_prob']
prob = start_prob_jump
# 如果需要继续训练,这里调整prob初始值
# prob = start_prob_jump - (start_prob_jump - end_prob_jump) / interval_prob * 20000
# 操作num_operations后进行训练
num_operations = options['num_operations']
# (1, 0) -> do nothing
# (0, 1) -> jump
actions = np.zeros(options['num_actions'])
img, score, is_dead = agent.perform_operation(actions)
size = (width, height)
img = preprocess_img(img, size=size, use_canny=options['use_canny'])
# 模型训练的输入
x_now = np.stack((img,) * num_channels, axis=2).reshape(1, height, width, num_channels)
x_init = x_now
loss_dict = {}
i = 0
while True:
i += 1
actions = np.zeros(options['num_actions'])
if random.random() <= prob:
print('[INFO]: Dino actions randomly...')
action_idx = random.randint(0, len(actions)-1)
actions[action_idx] = 1
else:
print('[INFO]: Dino actions controlled by network...')
Q_now = self.model.predict(x_now)
action_idx = np.argmax(Q_now)
actions[action_idx] = 1
img, score, is_dead = agent.perform_operation(actions)
img = preprocess_img(img, size=size, use_canny=options['use_canny'])
reward = self.score2reward(score, is_dead, actions)
img = img.reshape(1, height, width, 1)
x_next = np.append(img, x_now[:, :, :, :num_channels-1], axis=3)
Data_deque.append((x_now, action_idx, reward, x_next, is_dead, score))
if len(Data_deque) > options['data_memory']:
Data_deque.popleft()
# Data_deque = deque()
'''
if len(Data_deque) == num_operations:
agent.pause()
save_dict(Data_deque, options['data_dir'], 'Data_deque_%d.pkl' % batch_idx)
data_len = len(Data_deque) // options['num_sampling']
for i in range(options['num_sampling']):
batch_idx += 1
print('[INFO]: Start to train <Batch-%d>...' % batch_idx)
start = i * data_len
end = (i+1) * data_len
data = deque()
for j in range(start, end):
data.append(Data_deque[j])
loss = self.trainBatch(random.sample(data, options['batch_size']), options)
loss_dict[batch_idx] = loss
print('\t<Loss>: %.2f' % loss)
if batch_idx % options['save_interval'] == 0:
if not os.path.exists(options['savepath']):
os.mkdir(options['savepath'])
savename = options['savename'] + '_' + str(batch_idx) + '.h5'
self.model.save_weights(os.path.join(options['savepath'], savename))
save_dict(loss_dict, options['log_dir'], 'loss.pkl')
if batch_idx == options['max_batch']:
break
if batch_idx == options['max_batch']:
break
agent.resume()
'''
if i > num_operations:
# i = 0 if len(Data_deque) < 1000 else i
# i = 0
# agent.pause()
batch_idx += 1
print('[INFO]: Start to train <Batch-%d>...' % batch_idx)
loss = self.trainBatch(random.sample(Data_deque, options['batch_size']), options)
loss_dict[batch_idx] = loss
# print('\t<Loss>: %.3f' % loss)
print('\t<Loss>: %.3f, <Action>: %d' % (loss, action_idx))
if batch_idx % options['save_interval'] == 0:
if not os.path.exists(options['savepath']):
os.mkdir(options['savepath'])
savename = options['savename'] + '_' + str(batch_idx) + '.h5'
self.model.save_weights(os.path.join(options['savepath'], savename))
save_dict(loss_dict, options['log_dir'], 'loss.pkl')
if (len(Data_deque) == options['data_memory']) and (batch_idx % 5000 == 0):
save_dict(Data_deque, options['data_dir'], 'Data_deque_%d.pkl' % batch_idx)
if batch_idx == options['max_batch']:
break
# agent.resume()
x_now = x_init if is_dead else x_next
# 逐渐减小人为设定的控制,让网络自己决定如何行动
if prob > end_prob_jump and i > num_operations:
prob -= (start_prob_jump - end_prob_jump) / interval_prob
savename = options['savename'] + '_' + str(batch_idx) + '.h5'
self.model.save_weights(os.path.join(options['savepath'], savename))
# 训练一个Batch数据
def trainBatch(self, data_batch, options):
height = options['input_shape'][0]
width = options['input_shape'][1]
num_channels = options['input_shape'][2]
inputs = np.zeros((options['batch_size'], height, width, num_channels))
targets = np.zeros((inputs.shape[0], options['num_actions']))
for i in range(len(data_batch)):
x_now, action_idx, reward, x_next, is_dead, _ = data_batch[i]
inputs[i: i+1] = x_now
targets[i] = self.model.predict(x_now)
Q_next = self.model.predict(x_next)
if is_dead:
targets[i, action_idx] = reward
else:
targets[i, action_idx] = reward + options['rd_gamma'] * np.max(Q_next)
loss = self.model.train_on_batch(inputs, targets)
return loss
# scrore转reward
def score2reward(self, score, is_dead, actions):
'''
reward = 0.12
if actions[1] > actions[0]:
reward = 0.1
if is_dead:
reward = -100
'''
reward = 0.1
if is_dead:
reward = -1
return reward
# 导入权重
def load_weight(self, weight_path):
self.model.load_weights(weight_path)
# 保存模型信息
def save_model_info(self, savename='model.json'):
with open(savename, 'w') as f:
json.dump(self.model.to_json(), f)
def __repr__(self):
return '[Model]:\n%s' % self.model
def __str__(self):
return '[INFO]: model_1-CNN built by keras...'
'''
Function:
模型二: Torch框架搭建
'''
'''
class model_2(nn.Module):
def __init__(self, options):
super(model_2, self).__init__()
# in_channels, out_channels, kernel_size, stride
self.conv1 = nn.Conv2d(6, 32, 7, 2, padding=3)
self.conv2 = nn.Conv2d(32, 64, 3, 2, padding=1)
self.conv3 = nn.Conv2d(64, 64, 3, 2, padding=1)
self.fc1 = nn.Linear(64 * 4 * 8, 256)
self.fc2 = nn.Linear(256, options['num_actions'])
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.view(x.size(0), -1)))
x = self.fc2(x)
return x
'''
'''
Function:
模型三: TensorFlow框架搭建
'''
```
#### File: AITRexRush/Algorithm_1/train.py
```python
from nets.nets import *
from utils.api import *
from utils.utils import *
# for keras
# options = {
# "lr": 1e-4,
# "batch_size": 32,
# "max_batch": 150000,
# "input_shape": (96, 96, 4),
# "num_actions": 2,
# "savename": "autodino",
# "savepath": "./model",
# "log_dir": "logger",
# "data_dir": "data",
# "start_prob_jump": 0,
# "end_prob_jump": 0,
# "interval_prob": 1e5,
# "save_interval": 500,
# "num_operations": 200,
# "data_memory": 50000,
# "rd_gamma": 0.99,
# "use_canny": False,
# "num_sampling": 200,
# "game_url": 'chrome://dino'
# }
options = {
"lr": 1e-4,
"batch_size": 32,
"max_batch": 150000,
"input_shape": (96, 96, 4),
"num_actions": 2,
"savename": "autodino",
"savepath": "./model",
"log_dir": "logger",
"data_dir": "data",
"start_prob_jump": 0.1,
"end_prob_jump": 1e-4,
"interval_prob": 1e5,
"save_interval": 1500,
"num_operations": 200,
"data_memory": 50000,
"rd_gamma": 0.99,
"use_canny": False,
"num_sampling": 200,
"game_url": 'chrome://dino'
}
# for torch
'''
options = {
}
'''
# 训练函数
def train():
agent = DinoAgent(options['game_url'])
# model = model_1(options)
model = model_1(options, with_pool=True)
# 如果你需要继续训练模型,请去除下面的注释并指定正确的模型权重路径
model.load_weight('./model/autodino_100000.h5')
model.train(agent, options, batch_idx=100000)
if __name__ == '__main__':
train()
``` |
{
"source": "237085795/GraphEmbedding_annotion",
"score": 2
} |
#### File: GraphEmbedding_annotion/cluster/clustering.py
```python
import time
import numpy as np
import networkx as nx
from sklearn.cluster import KMeans
from collections import defaultdict
from sklearn.cluster import AgglomerativeClustering
# from Bio.Cluster import kcluster
# from Bio.Cluster import clustercentroids
# import
def kmeans_(vec_data, comms_num, map_id2node=None, use_set=False,n_init=10,init=None):
"""
return clf and comms
:param vec_data:
:param comms_num:质心数
:param map_id2node:
:param use_set:
:param n_init:运行n_init取最优
:return:
"""
if init is None :
clf = KMeans(n_clusters=comms_num,n_init=n_init)
else:
clf = KMeans(n_clusters=comms_num, n_init=n_init,init=init)
# start = time.time()
# clf.fit_predict()
clf.fit(vec_data)
# end = time.time()
# print("Kmeans running time: ", end - start)
# print(clf.inertia_)
cmus = defaultdict(list)
if map_id2node is not None:
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(map_id2node[j])
else:
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(j)
comms = []
for k, v in cmus.items():
if use_set:
comms.append(set(v))
else:
comms.append(v)
# cs = clf.cluster_centers_
# dists = np.zeros((vec_data.shape[0],cs.shape[0]))
# col = 0
# for c in cs:
# distances = np.sum(np.asarray(vec_data - c) ** 2,axis=1)
# # distances = np.sqrt(np.sum(np.asarray(vec_data - c) ** 2, axis=1))
# dists[:,col] = distances
# col += 1
# mindst = np.min(dists,axis=1)
# dstsum = np.sum(mindst)
# print(dstsum)
return comms ,clf
def kmeans_from_vec(vec_data, comms_num, map_id2node=None, use_set=False,n_init=10,init=None):
# 调用kmeans类。
if init is None :
clf = KMeans(n_clusters=comms_num,n_init=n_init)
else:
clf = KMeans(n_clusters=comms_num, n_init=n_init,init=init)
# 使用kmeans聚类。
# start = time.time()
# clf.fit_predict()
clf.fit(vec_data)
# end = time.time()
# print("Kmeans running time: ", end - start)
# print(clf.inertia_)
cmus = defaultdict(list)
if map_id2node is not None:
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(map_id2node[j])
else:
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(j)
comms = []
for k, v in cmus.items():
if use_set:
comms.append(set(v))
else:
comms.append(v)
return comms
def ward_from_vec(vec_data, comms_num, map_id2node, use_set=False):
clf = AgglomerativeClustering(n_clusters=comms_num, linkage='ward')
# clf = AgglomerativeClustering(n_clusters=comms_num, linkage='euclidean')
# start = time.time()
s = clf.fit(vec_data)
# end = time.time()
# print("Kmeans running time: ", end - start)
cmus = defaultdict(list)
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(map_id2node[j])
comms = []
for k, v in cmus.items():
if use_set:
comms.append(set(v))
else:
comms.append(v)
return comms
def power_kmeans(vec_data, comms_num, map_id2node, use_set=False):
clf = AgglomerativeClustering(n_clusters=comms_num, linkage='ward')
clf.fit(vec_data)
centers = np.zeros((comms_num,vec_data.shape[1]))
nodes_num = [0]*comms_num
for j in range(clf.labels_.shape[0]):
label = clf.labels_[j]
centers[label] += vec_data[j]
nodes_num[label] += 1
for la in range(comms_num):
centers[la] /= nodes_num[la]
clf2 = KMeans(n_clusters=comms_num,init=centers,n_init=1)
clf2.fit(vec_data)
print(clf2.inertia_)
cmus = defaultdict(list)
if map_id2node is not None:
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(map_id2node[j])
else:
for j in range(clf.labels_.shape[0]):
cmus[clf.labels_[j]].append(j)
comms = []
for k, v in cmus.items():
if use_set:
comms.append(set(v))
else:
comms.append(v)
return comms
```
#### File: GraphEmbedding_annotion/demo/line_classification.py
```python
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
warnings.filterwarnings("ignore")
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
import networkx as nx
from sklearn.linear_model import LogisticRegression
from ge import LINE
from ge.classify import read_node_label, Classifier
from ge.utils import show
def evaluate_embeddings(embeddings):
X, Y = read_node_label('../data/flight/labels-usa-airports.txt', skip_head=True)
tr_frac = 0.8
print("Training classifier using {:.2f}% nodes...".format(
tr_frac * 100))
clf = Classifier(embeddings=embeddings, clf=LogisticRegression(solver='liblinear'))
return clf.split_train_evaluate(X, Y, tr_frac)
if __name__ == "__main__":
G = nx.read_edgelist('../data/flight/usa-airports.edgelist', create_using=nx.Graph(), nodetype=None)
# model = LINE(G, embedding_size=128, order='first')
# model = LINE(G, embedding_size=128, order='second')
model = LINE(G, embedding_size=128, order='all')
model.train(batch_size=1024, epochs=10, verbose=2)
embeddings = model.get_embeddings()
metric=evaluate_embeddings(embeddings)
print(metric)
show(metric)
```
#### File: GraphEmbedding_annotion/examples/deepwalk_Netxxx.py
```python
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
from cluster.clustering import kmeans_from_vec
from ge import DeepWalk,LINE,Node2Vec
from metric.modularity import cal_Q as Q
from metric.nmi import calc as NMI
from ge.classify import read_node_label
def NMI_Q(embeddings, num_coms):
emb_list=[]
for i in range(1,len(embeddings)+1):
emb_list.append(embeddings[str(i)])
# edgelist index start at 1 / emb_list index start at 0
predict = kmeans_from_vec(emb_list, num_coms) # index start by 0
for i in range(len(predict)):
predict[i] = [str(x) for x in predict[i]]
# 数据处理
X, Y = read_node_label('../data/Net_mu/Net0.8/community.dat', is_net=True)
comu_idx = {}
for i in range(len(X)):
comu_idx.setdefault(Y[i][0], [])
comu_idx[Y[i][0]].append(i)
# print(comu_idx)
real=[]
# print(real)
for key in comu_idx:
real.append(comu_idx[key])
for i in range(len(real)):
real[i]=[str(x) for x in real[i]]
# print(predict)
# print(real)
mni = NMI(predict, real)
# print(mni)
#
predict_=predict
for i in range(len(predict)):
predict_[i]=[str(int(x)+1) for x in predict[i]]
# predict index add 1
#
#
q=Q(predict_, G)
# print(q)
#
return mni,q
def plot(embeddings):
emb_list=[]
for i in range(1,len(embeddings)+1):
# print(i)
emb_list.append(embeddings[str(i)])
emb_list = np.array(emb_list)
model = TSNE(n_components=2)
node_pos = model.fit_transform(emb_list)
# print(node_pos)
X, Y = read_node_label('../data/Net300/community.dat',is_net=True)
# colors = ['c', 'b', 'g', 'r', 'm', 'y', 'k']
colors = ['r' ,'b','g','y']
#
color_idx = {}
for i in range(len(X)):
color_idx.setdefault(Y[i][0], [])
color_idx[Y[i][0]].append(i)
# plt.figure(dpi=300, figsize=(24, 12))
for c, idx in color_idx.items():
# print(type(idx))
# print(c,idx)
# print(node_pos[idx, 0],node_pos[idx, 1],node_pos[idx,2])
plt.scatter(node_pos[idx, 0], node_pos[idx, 1], label=c)
# ax.scatter(node_pos[idx, 0], node_pos[idx, 1], node_pos[idx, 2], label=c)
plt.legend() # 图例
plt.show()
# plt.legend
# plt.show()
if __name__ == "__main__":
G = nx.read_edgelist('../data/Net_mu/Net0.8/network.dat', create_using=nx.Graph(), nodetype=None)
iter = 10
sum_mni = 0
sum_q = 0
for i in range(iter):
model = DeepWalk(G, embed_size=128,walk_length=25, num_walks=5, workers=1)
model.train(window_size=5, iter=5)
# model = LINE(G, embedding_size=128 ,order='all')
# model.train(batch_size=1024, epochs=50, verbose=2)
# model = Node2Vec(G, embed_size=128,walk_length=25, num_walks=5,
# p=0.25, q=2, workers=1, use_rejection_sampling=0)
# model.train(window_size = 5, iter = 3)
embeddings = model.get_embeddings()
mertric = NMI_Q(embeddings, 5) # 5
sum_mni += mertric[0]
sum_q += mertric[1]
print("mni:")
print(sum_mni / iter)
print("q:")
print(sum_q / iter)
# model = DeepWalk(G, embed_size=128,walk_length=25, num_walks=5, workers=1)
# model.train(window_size=5, iter=5)
# model = LINE(G, embedding_size=128, order='all')
# model.train(batch_size=1024, epochs=50, verbose=2)
# model = Node2Vec(G, embed_size=128,walk_length=25, num_walks=5,
# p=0.25, q=2, workers=1, use_rejection_sampling=0)
# model.train(window_size = 5, iter = 3)
# embeddings = model.get_embeddings()
# plot(embeddings)
# NMI_Q(embeddings, 4)
```
#### File: GraphEmbedding_annotion/metric/modularity.py
```python
import networkx as nx
import collections
# https://www.e-learn.cn/content/qita/2346706
# 展开式理解 模块度的大小定义为社区内部的总边数和网络中总边数的比例减去一个期望值,
# 该期望值是将网络设定为随机网络时同样的社区分配 所形成的社区内部的总边数和网络中总边数的比例的大小
# 对于有向图而言,Q = 1/M (遍历社区 ( Avw - kv_out * kw_in /M)) if (v,w) in the same community
# 也是算期望,可以理解为v有kv_out个入口,w有kw_in个出口,随机情况下从v到w存在边的概率为 kv_out * kw_in /M
# 不同于无向图 M = m(边数) Avw 也是表示 从v 到 w是否存在边 或者边权多少
# https://blog.csdn.net/marywbrown/article/details/62059231 新的参考,内部边记得乘2
def cal_Q2(partition,G):
m = len(G.edges(None, False))
a = []
e = []
if len(partition) == 1:
return 0
for community in partition:
t = 0.0
for node in community:
t += len(list(G.neighbors(node)))
a.append(t/(2*m))
for community in partition:
community = list(community)
t = 0.0
for i in range(len(list(community))):
for j in range(len(list(community))):
if(G.has_edge(community[i], community[j])):
t += 1.0
e.append(t/(2*m))
q = 0.0
for ei,ai in zip(e,a):
# 按展开式 e>0 一定存在不同v w,使得Avw=1 kvkw/2m一定小于1,因此Q一定为正
# e = 0 时 说明是个孤立点社区,不参与计算
if ei > 0:
q += (ei - ai**2)
return q
def cal_Q(partition, G):
m = len(G.edges(None, False))
a = []
e = []
if len(partition) == 1:
return 0
for community in partition:
community = list(community)
t1 = 0.0
t2 = 0.0
# Nc^2 => Nc*k
community_nodes = set(community)
for node in community:
t1 += len(list(G.neighbors(node)))
for nei in G.neighbors(node):
if nei in community_nodes:
t2 += 1.0
a.append(t1 / (2 * m)) # inner
e.append(t2 / (2 * m)) # intra
q = 0.0
# print(partition)
# print(e,a)
for ei, ai in zip(e, a):
# 按展开式 e>0 一定存在不同v w,使得Avw=1 kvkw/2m一定小于1,因此Q一定为正
# e = 0 时 说明是个孤立点社区,不参与计算
if ei > 0:
q += (ei - ai ** 2)
return q
def cal_EQ(partition, G):
# kkm,rc = cal_KKM_RC(partition,G)
# return 1000-rc
m = G.number_of_edges()
a = []
e = []
if len(partition) == 1:
return 0
node_label_count = collections.defaultdict(int)
for community in partition:
for node in community:
node_label_count[node] +=1
for community in partition:
community = list(community)
t1 = 0.0
t2 = 0.0
# Nc^2 => Nc*k
community_nodes = set(community)
for node in community:
if node in G:
t1 += len(G[node]) / node_label_count[node]
for nei in G[node]:
if nei in community_nodes:
t2 += 1.0/(node_label_count[nei] * node_label_count[node])
a.append(t1 / (2 * m))
e.append(t2 / (2 * m))
q = 0.0
for ei, ai in zip(e, a):
# 按展开式 e>0 一定存在不同v w,使得Avw=1 kvkw/2m一定小于1,因此Q一定为正
# e = 0 时 说明是个孤立点社区,不参与计算
# print(ei,ai)
if ei > 0:
q += (ei - ai ** 2)
return q
def read_cmu(path):
cmus = []
# cnt =0
with open(path, 'r') as f:
for row in f.readlines():
row = row.strip()
r = row.split(" ",-1)
cmus.append(r)
# cnt += len(r)
# print(cnt)
return cmus
# qinze
def cal_Q3(communities, G):
'''
Reference: Community detection in graphs, equation (15)
:param G: networkx graph object
:return standard modularity
'''
Q = 0
m = nx.number_of_edges(G)
for com in communities:
dc = 0
lc = 0
for i in com:
dc += len(G[i])
for j in G[i]:
if j in com:
lc += 1
lc /= 2.0 # for undirected graph
Q += lc / m - (dc / (2.0 * m)) ** 2
return Q
def cal_EQ_qinze(cover, G):
vertex_community = collections.defaultdict(lambda: set())
for i, c in enumerate(cover):
for v in c:
vertex_community[v].add(i)
m = 0.0
for v in G.nodes():
neighbors = G.neighbors(v)
for n in neighbors:
if v > n:
m += 1
total = 0.0
for c in cover:
for i in c:
o_i = len(vertex_community[i])
k_i = len(G[i])
for j in c:
o_j = len(vertex_community[j])
k_j = len(G[j])
if i > j:
continue
t = 0.0
if j in G[i]:
t += 1.0 / (o_i * o_j)
t -= k_i * k_j / (2 * m * o_i * o_j)
if i == j:
total += t
else:
total += 2 * t
return round(total / (2 * m), 4)
def cal_EQ2(communities, G):
'''
Reference:Community detection in graphs, equation (39)
:param G: networkx graph object
:return overlapping modularity
'''
v_com = collections.defaultdict(lambda: set())
for i, com in enumerate(communities):
for v in com:
v_com[v].add(i)
EQ_1 = 0
EQ_2 = 0
m = nx.number_of_edges(G)
for com in communities:
s_2 = 0
for i in com:
k_i = len(G[i])
o_i = len(v_com[i])
s_i = 0
for j in G[i]:
if j in com:
o_j = len(v_com[j])
s_i += 1.0 / o_j
EQ_1 += s_i / o_i
s_2 += 1.0 * k_i / o_i
EQ_2 += s_2 ** 2
EQ = (EQ_1 - EQ_2 / (2.0 * m)) / (2.0 * m)
return EQ
def cal_KKM_RC(partitions,G):
comm_num = len(partitions)
intra_score = 0
inter_score = 0
n = G.number_of_nodes()
for partition in partitions:
partition = set(partition)
intra = 0
inter = 0
for node in partition:
# if node not in G:
# continue
for nei in G.neighbors(node):
if nei in partition:
intra += 1
else:
inter += 1
intra_score += intra / len(partition)
inter_score += inter / len(partition)
KKM = 2*(n - comm_num) - intra_score
RC = inter_score
# print(comm_num,n)
return KKM, RC
def cal_EQ_KKM_RC(partitions,G):
a = []
e = []
n = G.number_of_nodes()
m = G.number_of_edges()
intra_score = 0
inter_score = 0
comm_num = len(partitions)
node_label_count = collections.defaultdict(int)
for community in partitions:
for node in community:
node_label_count[node] +=1
for community in partitions:
t1 = 0.0
t2 = 0.0
community_nodes = set(community)
intra = 0
inter = 0
for node in community_nodes:
t1 += len(G[node]) / node_label_count[node]
for nei in G[node]:
if nei in community_nodes:
intra += 1
t2 += 1.0/(node_label_count[nei] * node_label_count[node])
else:
inter += 1
intra_score += intra / len(community_nodes)
inter_score += inter / len(community_nodes)
a.append(t1 / (2 * m))
e.append(t2 / (2 * m))
q = 0.0
for ei, ai in zip(e, a):
if ei > 0:
q += (ei - ai ** 2)
KKM = 2*(n - comm_num) - intra_score
RC = inter_score
return q,KKM,RC
# return 1000-RC,KKM,RC
if __name__ == '__main__':
cmus = read_cmu("./output/res.txt")
graph = nx.read_edgelist("./data/network.dat")
print(cal_Q(cmus,graph))
``` |
{
"source": "2390201w/fork_uofg_facemask_detector",
"score": 2
} |
#### File: dataset_scripts/tests/test_rename.py
```python
import os
import pytest
from dataset_scripts.rename_images import rename_image
@pytest.fixture
def get_test_images_dir():
"""Gets the path of the test_image_dir"""
test_images_dir = "dataset_scripts/tests/testDataset"
return test_images_dir
def test_incorrect_mask(get_test_images_dir): # pylint: disable=W0621
"""Renames images in the test_image_dir to incorrect mask naming scheme"""
rename_image(get_test_images_dir, "incorrect-mask-")
result = os.listdir(get_test_images_dir)
assert result[0] == "incorrect-mask-0001.jpg"
assert result[1] == "incorrect-mask-0002.jpg"
def test_with_mask(get_test_images_dir): # pylint: disable=W0621
"""Renames images in the test_image_dir to with mask naming scheme"""
rename_image(get_test_images_dir, "mask-")
result = os.listdir(get_test_images_dir)
assert result[0] == "mask-0001.jpg"
assert result[1] == "mask-0002.jpg"
def test_without_mask(get_test_images_dir): # pylint: disable=W0621
"""Renames images in the test_image_dir to no mask naming scheme"""
rename_image(get_test_images_dir, "no-mask-")
result = os.listdir(get_test_images_dir)
assert result[0] == "no-mask-0001.jpg"
assert result[1] == "no-mask-0002.jpg"
```
#### File: fork_uofg_facemask_detector/model_scripts/train_model.py
```python
import os
import sys
import argparse
from glob import glob
import numpy as np # pylint: disable=W0611
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from sklearn.model_selection import train_test_split
BATCH_SIZE = 5 # Num of Pics to Train each time
EPOCHS = 5 # Training Loop
INPUT_SHAPE = 224 # [96, 128, 160, 192, 224]
TYPE_DATASET = ['withMask', 'withoutMask', 'incorrectMask']
def preproc(filename, label):
"""preprocess image for training"""
image_byte = tf.io.read_file(filename)
image = tf.io.decode_image(image_byte, channels=3)
image_resize = tf.image.resize_with_pad(image, INPUT_SHAPE, INPUT_SHAPE)
image_norm = tf.cast(image_resize, tf.float32) / 255.0
label_onehot = tf.one_hot(label, len(
TYPE_DATASET), on_value=None, off_value=None)
return image_norm, label_onehot
def make_dataset(images, labels):
"""make training dataset"""
num = len(images)
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset = dataset.shuffle(num).repeat()
# , num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(preproc)
# .prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
return dataset
def prepare_dataset(dir_dataset: str, type_dataset: list, img_ext="*.jpg"):
"""prepare dataset for training"""
images = []
labels = []
for index, value in enumerate(type_dataset):
this_type_dataset = value
dir_mask = os.path.join(dir_dataset, this_type_dataset)
f_mask = glob(os.path.join(dir_mask, img_ext))
num_mask = len(f_mask)
label_mask = [index]*num_mask
images += f_mask
labels += label_mask
return images, labels
def train(dir_dataset, model_path):
"""training the model using MobileNetV2"""
images, labels = prepare_dataset(dir_dataset=dir_dataset,
type_dataset=TYPE_DATASET)
(training_images, testing_images, training_labels, testing_labels) = train_test_split(
images, labels, test_size=0.20, random_state=42)
dataset_train = make_dataset(training_images, training_labels)
dataset_vali = make_dataset(testing_images, testing_labels)
# Create base model using MobileNetV2
base_model = keras.applications.MobileNetV2(
input_shape=(INPUT_SHAPE, INPUT_SHAPE, 3), include_top=False)
model_output = Dense(len(TYPE_DATASET), activation='softmax')(
Dense(1024, activation='relu')(
GlobalAveragePooling2D()(base_model.output)
)
)
model = keras.Model(inputs=base_model.input, outputs=model_output)
# Freeze convolution
for layer in base_model.layers:
layer.trainable = False
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(dataset_train,
steps_per_epoch=len(training_images)//BATCH_SIZE,
epochs=EPOCHS,
validation_data=dataset_vali,
validation_steps=len(testing_images)//BATCH_SIZE,
verbose=1)
model.evaluate(dataset_vali, steps=len(
testing_images)//BATCH_SIZE, verbose=1)
model.save(model_path)
if __name__ == "__main__":
ap = argparse.ArgumentParser() # pylint: disable=C0103
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
ap.add_argument("-m", "--model", type=str,
default="mask_detector.model",
help="path to output face mask detector model")
args = vars(ap.parse_args()) # pylint: disable=C0103
current_dir = os.path.dirname(os.path.realpath(__file__)) # pylint: disable=C0103
parent_dir = os.path.dirname(current_dir) # pylint: disable=C0103
if args["dataset"] not in os.listdir(parent_dir):
print("Invalid dataset folder name\nPlease try again")
sys.exit(0)
dataset_dir = os.path.join(parent_dir, args["dataset"]) # pylint: disable=C0103
train(dataset_dir, args["model"])
``` |
{
"source": "2392863668/tensorFlow",
"score": 2
} |
#### File: py2tf/convert/builtin_functions.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import templates
class BuiltinFunctionTransformer(gast.NodeTransformer):
"""Transforms Print nodes to Call so they can be handled as functions."""
# TODO(mdan): Bring print_functions in here.
def _convert_len(self, node):
def template(args):
tf.shape(args)[0] # pylint:disable=undefined-variable,expression-not-assigned
new_call = templates.replace(template, args=node.args)[0].value
return new_call
# pylint:disable=invalid-name
def visit_Call(self, node):
self.generic_visit(node)
# TODO(mdan): This won't work if the function was hidden.
if isinstance(node.func, gast.Name) and node.func.id == 'len':
return self._convert_len(node)
return node
# pylint:enable=invalid-name
def transform(node):
transformer = BuiltinFunctionTransformer()
node = transformer.visit(node)
return node
```
#### File: keras/layers/local.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import conv_utils
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected1D, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=3)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
input_dim = input_shape[2]
if input_dim is None:
raise ValueError('Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
output_length = conv_utils.conv_output_length(
input_shape[1], self.kernel_size[0], self.padding, self.strides[0])
self.kernel_shape = (output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=3, axes={2: input_dim})
self.built = True
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
length = conv_utils.conv_output_length(input_shape[1], self.kernel_size[0],
self.padding, self.strides[0])
return tensor_shape.TensorShape([input_shape[0], length, self.filters])
def call(self, inputs):
output = K.local_conv1d(inputs, self.kernel, self.kernel_size, self.strides)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LocallyConnected2D(Layer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
Examples:
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected2D, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
self.kernel_shape = (
output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], self.filters, rows, cols])
elif self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, self.filters])
def call(self, inputs):
output = K.local_conv2d(inputs,
self.kernel,
self.kernel_size,
self.strides,
(self.output_row, self.output_col),
self.data_format)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
``` |
{
"source": "239-software/improved_RE2",
"score": 2
} |
#### File: improved_RE2/src/model.py
```python
import os
import math
import random
import torch
import torch.nn.functional as f
from tqdm import tqdm
from .network import Network
from .utils.metrics import registry as metrics
class Model:
prefix = 'checkpoint'
best_model_name = 'best.pt'
def __init__(self, args, state_dict=None):
self.args = args
# network
self.network = Network(args)
self.device = torch.cuda.current_device() if args.cuda else torch.device('cpu')
self.network.to(self.device)
# optimizer
self.params = list(filter(lambda x: x.requires_grad, self.network.parameters()))
self.opt = torch.optim.Adam(self.params, args.lr, betas=(args.beta1, args.beta2),
weight_decay=args.weight_decay)
# updates
self.updates = state_dict['updates'] if state_dict else 0
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['model'].keys()):
if k not in new_state:
del state_dict['model'][k]
self.network.load_state_dict(state_dict['model'])
self.opt.load_state_dict(state_dict['opt'])
def _update_schedule(self):
if self.args.lr_decay_rate < 1.:
args = self.args
t = self.updates
base_ratio = args.min_lr / args.lr
if t < args.lr_warmup_steps:
ratio = base_ratio + (1. - base_ratio) / max(1., args.lr_warmup_steps) * t
else:
ratio = max(base_ratio, args.lr_decay_rate ** math.floor((t - args.lr_warmup_steps) /
args.lr_decay_steps))
self.opt.param_groups[0]['lr'] = args.lr * ratio
def update(self, batch):
self.network.train()
self.opt.zero_grad()
inputs, target = self.process_data(batch)
output = self.network(inputs)
#wrong_index = torch.max(torch.isnan(output[:,0]),0)[1].cpu().data.numpy()
#if wrong_index > 0:
# print(wrong_index)
# print(inputs['text1'][wrong_index,:])
# print(inputs['text2'][wrong_index,:])
# print(target[wrong_index])
summary = self.network.get_summary()
loss = self.get_loss(output, target)
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(self.params, self.args.grad_clipping)
assert grad_norm >= 0, 'encounter nan in gradients.'
self.opt.step()
self._update_schedule()
self.updates += 1
stats = {
'updates': self.updates,
'loss': loss.item(),
'lr': self.opt.param_groups[0]['lr'],
'gnorm': grad_norm,
'summary': summary,
}
return stats
def evaluate(self, data):
self.network.eval()
targets = []
probabilities = []
predictions = []
losses = []
for batch in tqdm(data[:self.args.eval_subset], desc='evaluating', leave=False):
inputs, target = self.process_data(batch)
with torch.no_grad():
output = self.network(inputs)
loss = self.get_loss(output, target)
pred = torch.argmax(output, dim=1)
prob = torch.nn.functional.softmax(output, dim=1)
losses.append(loss.item())
targets.extend(target.tolist())
probabilities.extend(prob.tolist())
predictions.extend(pred.tolist())
outputs = {
'target': targets,
'prob': probabilities,
'pred': predictions,
'args': self.args,
}
stats = {
'updates': self.updates,
'loss': sum(losses[:-1]) / (len(losses) - 1) if len(losses) > 1 else sum(losses),
}
for metric in self.args.watch_metrics:
if metric not in stats: # multiple metrics could be computed by the same function
stats.update(metrics[metric](outputs))
assert 'score' not in stats, 'metric name collides with "score"'
eval_score = stats[self.args.metric]
stats['score'] = eval_score
return eval_score, stats # first value is for early stopping
def predict(self, batch):
self.network.eval()
inputs, _ = self.process_data(batch)
with torch.no_grad():
output = self.network(inputs)
output = torch.nn.functional.softmax(output, dim=1)
return output.tolist()
def process_data(self, batch):
text1 = torch.LongTensor(batch['text1']).to(self.device)
text2 = torch.LongTensor(batch['text2']).to(self.device)
mask1 = torch.ne(text1, self.args.padding).unsqueeze(2)
mask2 = torch.ne(text2, self.args.padding).unsqueeze(2)
inputs = {
'text1': text1,
'text2': text2,
'mask1': mask1,
'mask2': mask2,
}
if 'target' in batch:
target = torch.LongTensor(batch['target']).to(self.device)
return inputs, target
return inputs, None
@staticmethod
def get_loss(logits, target):
return f.cross_entropy(logits, target)
def save(self, states, name=None):
if name:
filename = os.path.join(self.args.summary_dir, name)
else:
filename = os.path.join(self.args.summary_dir, f'{self.prefix}_{self.updates}.pt')
params = {
'state_dict': {
'model': self.network.state_dict(),
'opt': self.opt.state_dict(),
'updates': self.updates,
},
'args': self.args,
'random_state': random.getstate(),
'torch_state': torch.random.get_rng_state()
}
params.update(states)
if self.args.cuda:
params['torch_cuda_state'] = torch.cuda.get_rng_state()
torch.save(params, filename)
@classmethod
def load(cls, file):
checkpoint = torch.load(file, map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
))
prev_args = checkpoint['args']
# update args
prev_args.output_dir = os.path.dirname(os.path.dirname(file))
prev_args.summary_dir = os.path.join(prev_args.output_dir, prev_args.name)
prev_args.cuda = prev_args.cuda and torch.cuda.is_available()
return cls(prev_args, state_dict=checkpoint['state_dict']), checkpoint
def num_parameters(self, exclude_embed=False):
num_params = sum(p.numel() for p in self.network.parameters() if p.requires_grad)
if exclude_embed:
num_params -= 0 if self.args.fix_embeddings else next(self.network.embedding.parameters()).numel()
return num_params
def set_embeddings(self, embeddings):
self.network.embedding.set_(embeddings)
``` |
{
"source": "23andMe/Lintly",
"score": 3
} |
#### File: Lintly/lintly/patch.py
```python
import logging
from autologging import logged, traced
import re
from cached_property import cached_property
FILE_NAME_LINE = re.compile(r'^\+\+\+ b/(?P<file_name>.+)')
RANGE_INFORMATION_LINE = re.compile(r'^@@ .+\+(?P<line_number>\d+),')
MODIFIED_LINE = re.compile(r'^\+(?!\+|\+)')
NOT_REMOVED_OR_NEWLINE_WARNING = re.compile(r'^[^-\\]')
logger = logging.getLogger(__name__)
@logged
@traced
class Patch(object):
"""
Parses the body of a diff and returns the lines that changed as well as their "position",
as outlined by GitHub here: https://developer.github.com/v3/pulls/comments/#create-a-comment
"""
def __init__(self, body=''):
self.body = body
@cached_property
def changed_lines(self):
"""
A list of dicts in the format:
{
'file_name': str,
'content': str,
'line_number': int,
'position': int
}
"""
lines = []
file_name = ''
line_number = 0
patch_position = -1
found_first_information_line = False
for i, content in enumerate(self.body.splitlines()):
range_information_match = RANGE_INFORMATION_LINE.search(content)
file_name_line_match = FILE_NAME_LINE.search(content)
if file_name_line_match:
file_name = file_name_line_match.group('file_name')
found_first_information_line = False
elif range_information_match:
line_number = int(range_information_match.group('line_number'))
if not found_first_information_line:
# This is the first information line. Set patch position to 1 and start counting
patch_position = 0
found_first_information_line = True
elif MODIFIED_LINE.search(content):
line = {
'file_name': file_name,
'content': content,
'line_number': line_number,
'position': patch_position
}
lines.append(line)
line_number += 1
elif NOT_REMOVED_OR_NEWLINE_WARNING.search(content) or content == '':
line_number += 1
patch_position += 1
return lines
def get_patch_position(self, file_name, line_number):
matching_lines = [line for line in self.changed_lines
if line['file_name'] == file_name and line['line_number'] == line_number]
if len(matching_lines) == 0:
return None
elif len(matching_lines) == 1:
return matching_lines[0]['position']
else:
logger.warning('Invalid patch or build.')
logger.warning('Multiple matching lines found for {file_name} on line '
'{line_number}'.format(file_name=file_name, line_number=line_number))
```
#### File: Lintly/tests/test_parsers.py
```python
import abc
import os
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from lintly.parsers import PARSERS
class ParserTestCaseMixin(object):
"""Mixin for testing parsers.
Attributes
----------
linter_output : str
A linter output to test against.
"""
__metaclass__ = abc.ABCMeta
@property
@abc.abstractmethod
def parser(self):
"""lintly.parsers.BaseLintParser: a parser to test."""
pass
@property
@abc.abstractmethod
def linter_output_file_name(self):
"""str: linter output file name (relative to the `linters_output` directory)."""
pass
@property
@abc.abstractmethod
def expected_violations(self):
"""dict: a mapping between files and expected violations' attributes.
Examples
--------
{
'path/to/file': [{
'line': 42,
'column': 42,
'code': 'the-answer',
'message': 'don\'t panic & bring your towel'
}]
}
"""
pass
@staticmethod
def load_linter_output(file_name):
path = os.path.join(os.path.dirname(__file__), 'linters_output', file_name)
with open(path, 'r') as linter_output:
return linter_output.read()
def setUp(self):
self.linter_output = self.load_linter_output(self.linter_output_file_name)
def test_parse_violations(self):
violations = self.parser.parse_violations(self.linter_output)
# Checking files.
self.assertEqual(set(violations.keys()), set(self.expected_violations.keys()))
# Checking violations.
for file_path, violations in violations.items():
expected_violations = self.expected_violations[file_path]
# Checking violations count.
self.assertEqual(len(violations), len(expected_violations))
# Checking violations' attributes.
for violation_index, violation in enumerate(violations):
self.check_object_attrs(violation, expected_violations[violation_index])
def check_object_attrs(self, _object, expected_attrs):
for expected_attr, expected_value in expected_attrs.items():
self.assertTrue(hasattr(_object, expected_attr))
if expected_attr == 'message':
self.check_violation_message(_object, expected_attrs)
continue
self.assertEqual(getattr(_object, expected_attr), expected_value)
def check_violation_message(self, violation, expected_attrs):
# Override this for linters where the PR message gets augmented with extra info.
# By default, perform a simple equality check.
self.assertEqual(violation.message, expected_attrs['message'])
class IgnoreInstructionsMixin(ParserTestCaseMixin):
def check_violation_message(self, violation, expected_attrs):
self.assertTrue(violation.message)
self.assertIn(expected_attrs['message'], violation.message)
self.assertIn(self.parser.ignore_instructions, violation.message)
class IgnoreInstructionsWithRuleIdsMixin(ParserTestCaseMixin):
def check_violation_message(self, violation, expected_attrs):
self.assertTrue(violation.message)
self.assertIn(expected_attrs['message'], violation.message)
ignore_instructions_fmt = self.parser.ignore_instructions_format
ignore_instructions = ignore_instructions_fmt.format(rule_id=expected_attrs['code'])
self.assertIn(ignore_instructions, violation.message)
class Flake8ParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['flake8']
linter_output_file_name = 'flake8.txt'
expected_violations = {
'lintly/parsers.py': [
{'line': 80, 'column': 5, 'code': 'E303', 'message': 'too many blank lines (3)'},
{'line': 216, 'column': 1, 'code': 'W391', 'message': 'blank line at end of file'}
],
'lintly/violations.py': [
{'line': 1, 'column': 1, 'code': 'F401', 'message': '\'itertools\' imported but unused'},
{'line': 20, 'column': 5, 'code': 'C901', 'message': '\'Violation.complex_method\' is too complex (11)'}
]
}
class BanditJSONParserTestCase(IgnoreInstructionsMixin, unittest.TestCase):
parser = PARSERS['bandit-json']
linter_output_file_name = 'bandit-json.txt'
expected_violations = {
'build/lib/lintly/formatters.py': [
{'line': 14, 'column': 0, 'code': 'B701 (jinja2_autoescape_false)',
'message': ('Using jinja2 templates with autoescape=False is dangerous and can lead to XSS. '
'Use autoescape=True or use the select_autoescape function to mitigate XSS vulnerabilities.')}
],
'lintly/formatters.py': [
{'line': 14, 'column': 0, 'code': 'B701 (jinja2_autoescape_false)',
'message': ('Using jinja2 templates with autoescape=False is dangerous and can lead to XSS. '
'Use autoescape=True or use the select_autoescape function to mitigate XSS vulnerabilities.')}
],
'tests/test_builds.py': [
{'line': 48, 'column': 0, 'code': 'B101 (assert_used)',
'message': ('Use of assert detected. '
'The enclosed code will be removed when compiling to optimised byte code.')}
],
'tests/test_cli.py': [
{'line': 13, 'column': 0, 'code': 'B101 (assert_used)',
'message': ('Use of assert detected. '
'The enclosed code will be removed when compiling to optimised byte code.')},
{'line': 14, 'column': 0, 'code': 'B101 (assert_used)',
'message': ('Use of assert detected. '
'The enclosed code will be removed when compiling to optimised byte code.')},
{'line': 15, 'column': 0, 'code': 'B101 (assert_used)',
'message': ('Use of assert detected. '
'The enclosed code will be removed when compiling to optimised byte code.')}
]
}
class GitleaksParserTestCase(IgnoreInstructionsMixin, unittest.TestCase):
parser = PARSERS['gitleaks']
linter_output_file_name = 'gitleaks.json'
expected_violations = {
'relative/path/to/output': [
{'line': 5, 'column': 0, 'code': 'AKIAIO5FODNN7EXAMPLE',
'message': 'AWS Access Key'}
],
'relative/path/to/output2': [
{'line': 2, 'column': 0, 'code': '-----BEGIN PRIVATE KEY-----',
'message': 'Asymmetric Private Key'}
]
}
class HadolintParserTestCase(IgnoreInstructionsWithRuleIdsMixin, unittest.TestCase):
parser = PARSERS['hadolint']
linter_output_file_name = 'hadolint.json'
expected_violations = {
'relative/path/to/file1': [
{'line': 12, 'column': 1, 'code': 'DL3015',
'message': 'Avoid additional packages by specifying `--no-install-recommends`'}
],
'relative/path/to/file2': [
{'line': 19, 'column': 1, 'code': 'DL3020',
'message': 'Use COPY instead of ADD for files and folders'}
]
}
class TerrascanParserTestCase(IgnoreInstructionsWithRuleIdsMixin, unittest.TestCase):
parser = PARSERS['terrascan']
linter_output_file_name = 'terrascan.json'
expected_violations = {
'main.tf': [
{'line': 6, 'column': 0, 'code': 'AC_AWS_0483',
'message': 'Lambda does not use KMS CMK key to protect environment variables.'}
],
'api_gateway_config.tf': [
{'line': 15, 'column': 0, 'code': 'AC_AWS_0014',
'message': 'Enable AWS CloudWatch Logs for APIs'}
]
}
class TerrascanParserNoErrorsTestCase(IgnoreInstructionsWithRuleIdsMixin, unittest.TestCase):
parser = PARSERS['terrascan']
linter_output_file_name = 'terrascan_no_errors.json'
expected_violations = {}
class TrivyParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['trivy']
linter_output_file_name = 'trivy.json'
expected_violations = {
'relative/path/to/file1': [
{'line': 0, 'column': 0, 'code': 'RUSTSEC-2019-0001 (Uncontrolled recursion in HTML serialization)',
'message': 'Affected versions of this crate did use recursion for serialization of HTML\nDOM trees.'}
],
'relative/path/to/file2': [
{'line': 0, 'column': 0, 'code': 'CVE-2019-19844 (Django: crafted email address allows account takeover)',
'message': 'Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover.'}
]
}
class TfsecParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['tfsec']
linter_output_file_name = 'tfsec.json'
expected_violations = {
'relative/path/to/file1': [
{'line': 8, 'column': 0, 'code': 'AWS004 (Use of plain HTTP.)',
'message': 'Resource aws_alb_listener.my-alb-listener uses plain HTTP instead of HTTPS.'}
],
'relative/path/to/file2': [
{'line': 21, 'column': 0, 'code': 'AZU003 (Unencrypted managed disk.)',
'message': 'Resource azurerm_managed_disk.source defines an unencrypted managed disk.'}
]
}
class SemgrepParserTestCase(IgnoreInstructionsMixin, unittest.TestCase):
parser = PARSERS['semgrep']
linter_output_file_name = 'semgrep.json'
expected_violations = {
'relative/path/to/file1': [
{'line': 115, 'column': 20, 'code': 'python.lang.security.deserialization.pickle.avoid-pickle',
'message': 'Avoid using `pickle`, which is known to lead to code execution vulnerabilities.'}
],
'relative/path/to/file2': [
{'line': 121, 'column': 5, 'code': 'python.jinja2.security.audit.autoescape-disabled.autoescape-disabled',
'message': 'Detected a Jinja2 environment without autoescaping.'}
]
}
class PylintJSONParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['pylint-json']
linter_output_file_name = 'pylint-json.txt'
expected_violations = {
'lintly/patch.py': [
{'line': 22, 'column': 0, 'code': 'W0511 (fixme)', 'message': 'TODO: Cache this'},
{'line': 1, 'column': 0, 'code': 'C0111 (missing-docstring)', 'message': 'Missing module docstring'}
],
'lintly/config.py': [
{'line': 6, 'column': 0, 'code': 'C0301 (line-too-long)', 'message': 'Line too long (112/100)'},
{'line': 1, 'column': 0, 'code': 'C0111 (missing-docstring)', 'message': 'Missing module docstring'},
{
'line': 10, 'column': 8, 'code': 'C0103 (invalid-name)',
'message': 'Attribute name "ci" doesn\'t conform to snake_case naming style'
}
]
}
def test_pylint_no_errors(self):
violations = self.parser.parse_violations('')
self.assertEqual(violations, dict())
violations = self.parser.parse_violations('No config file found, using default configuration\n')
self.assertEqual(violations, dict())
class ESLintParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['eslint']
linter_output_file_name = 'eslint.txt'
expected_violations = {
'static/file1.js': [
{'line': 1, 'column': 1, 'code': 'no-undef', 'message': '\'$\' is not defined'},
{
'line': 1, 'column': 11, 'code': 'space-before-function-paren',
'message': 'Missing space before function parentheses'
},
{'line': 2, 'column': 1, 'code': 'indent', 'message': 'Expected indentation of 2 spaces but found 4'}
],
'static/file2.js': [
{'line': 3, 'column': 1, 'code': 'indent', 'message': 'Expected indentation of 4 spaces but found 8'},
{'line': 4, 'column': 68, 'code': 'semi', 'message': 'Extra semicolon'}
]
}
@patch('lintly.parsers.ESLintParser._get_working_dir', return_value='/Users/grant/project')
def test_parse_violations(self, _get_working_dir_mock):
super(ESLintParserTestCase, self).test_parse_violations()
class CheckmakeParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['checkmake']
linter_output_file_name = "checkmake.txt"
expected_violations = {
'path/to/Makefile': [
{'line': 45, 'column': 0, 'code': '', 'message': 'minphony:Missing required phony target "all"'},
{'line': 45, 'column': 0, 'code': '', 'message': 'minphony:Missing required phony target "clean"'},
{'line': 45, 'column': 0, 'code': '', 'message': 'minphony:Missing required phony target "test"'},
]
}
@patch('lintly.parsers.LineRegexParser._get_working_dir', return_value='/')
@patch.dict(os.environ, {"LINTLY_FILE_OVERRIDE": "/path/to/Makefile"})
def test_parse_violations(self, _get_working_dir_mock):
super(CheckmakeParserTestCase, self).test_parse_violations()
class StylelintParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['stylelint']
linter_output_file_name = 'stylelint.txt'
expected_violations = {
'lintly/static/sass/file1.scss': [
{'line': 13, 'column': 1, 'code': 'max-empty-lines', 'message': 'Expected no more than 1 empty line'}
],
'lintly/static/sass/file2.scss': [
{
'line': 11, 'column': 1, 'code': 'at-rule-empty-line-before',
'message': 'Expected empty line before at-rule'
},
{'line': 27, 'column': 1, 'code': 'max-empty-lines', 'message': 'Expected no more than 1 empty line'},
{'line': 31, 'column': 22, 'code': 'number-leading-zero', 'message': 'Expected a leading zero'}
]
}
class BlackParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS["black"]
linter_output_file_name = "black.txt"
# Messages contain diff blocks and are long, so they are abbreviated here and will be tested with assertIn.
expected_violations = {
"lint_test/test_another.py": [
{
"line": 1, "column": 0, "code": "`black`",
"message": ("-ANOTHER_CONST = 'another single quote'\n+ANOTHER_CONST = \"another single quote\""),
},
],
"lint_test/test_black.py": [
{"line": 1, "column": 0, "code": "`black`", "message": "+from . import (\n+ very_long_line_here,\n"},
{
"line": 27, "column": 0, "code": "`black`",
"message": ("-SOME_CONST = 'black is also very particular about double quotes'\n"
"+SOME_CONST = \"black is also very particular about double quotes\""),
},
],
}
def check_object_attrs(self, _object, expected_attrs):
# Patch check_object_attrs to accept abbreviated messages.
for expected_attr, expected_value in expected_attrs.items():
self.assertTrue(hasattr(_object, expected_attr))
if expected_attr == "message":
self.assertIn(expected_value, getattr(_object, expected_attr))
else:
self.assertEqual(getattr(_object, expected_attr), expected_value)
class CfnLintParserTestCase(ParserTestCaseMixin, unittest.TestCase):
parser = PARSERS['cfn-lint']
linter_output_file_name = 'cfn-lint.txt'
expected_violations = {
"templates/template.yaml": [
{'line': 2, 'column': 9, 'code': 'W2001', 'message': 'Parameter UnusedParameter not used.'},
{'line': 5, 'column': 9, 'code': 'W2001', 'message': 'Parameter AnotherOne not used.'}
],
"templates/template2.yaml": [
{'line': 7, 'column': 9, 'code': 'E1012',
'message': 'Ref PrincipalOrgID not found as a resource or parameter'},
]
}
class CfnNagParserTestCase(IgnoreInstructionsWithRuleIdsMixin, unittest.TestCase):
parser = PARSERS['cfn-nag']
linter_output_file_name = 'cfn-nag.json'
expected_violations = {
"cloudformation/perfect-stack.yaml": [],
"cloudformation/problem-stack.yaml": [
{'line': 24, 'column': 0, 'code': 'F3',
'message': 'IAM role should not allow * action on its permissions policy'},
{'line': 150, 'column': 0, 'code': 'F3', 'message':
'IAM role should not allow * action on its permissions policy'},
{'line': 50, 'column': 0, 'code': 'W35',
'message': 'S3 Bucket should have access logging configured'}
],
"cloudformation/warning-stack.yaml": [
{'line': 320, 'column': 0, 'code': 'W42',
'message': 'Security Groups ingress with an ipProtocol of -1 found '},
]
}
``` |
{
"source": "23andMe/saleor",
"score": 2
} |
#### File: saleor/core/exceptions.py
```python
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Sequence, Union
from ..checkout.error_codes import CheckoutErrorCode
if TYPE_CHECKING:
from ..checkout.models import CheckoutLine
from ..order.models import OrderLine
from ..product.models import ProductVariant
@dataclass
class InsufficientStockData:
variant: Optional["ProductVariant"] = None
checkout_line: Optional["CheckoutLine"] = None
order_line: Optional["OrderLine"] = None
warehouse_pk: Union[str, int, None] = None
available_quantity: Optional[int] = None
class InsufficientStock(Exception):
def __init__(self, items: List[InsufficientStockData]):
details = [str(item.variant or item.order_line) for item in items]
super().__init__(f"Insufficient stock for {', '.join(details)}")
self.items = items
self.code = CheckoutErrorCode.INSUFFICIENT_STOCK
class AllocationError(Exception):
def __init__(self, order_lines):
lines = [str(line) for line in order_lines]
super().__init__(f"Unable to deallocate stock for lines {', '.join(lines)}.")
self.order_lines = order_lines
class PreorderAllocationError(Exception):
def __init__(self, order_line):
super().__init__(f"Unable to allocate in stock for line {str(order_line)}.")
self.order_line = order_line
class ReadOnlyException(Exception):
def __init__(self, msg=None):
if msg is None:
msg = "API runs in read-only mode"
super().__init__(msg)
class ProductNotPublished(Exception):
def __init__(self, context=None):
super().__init__("Can't add unpublished product.")
self.context = context
self.code = CheckoutErrorCode.PRODUCT_NOT_PUBLISHED
class PermissionDenied(Exception):
def __init__(self, message=None, *, permissions: Sequence[Enum] = None):
if not message:
if permissions:
permission_list = ", ".join(p.name for p in permissions)
message = (
f"You need one of the following permissions: {permission_list}"
)
else:
message = "You do not have permission to perform this action"
super().__init__(message)
self.permissions = permissions
class GiftCardNotApplicable(Exception):
def __init__(self, message: str):
super().__init__(message)
self.message = message
self.code = CheckoutErrorCode.GIFT_CARD_NOT_APPLICABLE.value
```
#### File: checkout/mutations/checkout_add_promo_code.py
```python
import graphene
from django.core.exceptions import ValidationError
from ....checkout.checkout_cleaner import validate_checkout_email
from ....checkout.error_codes import CheckoutErrorCode
from ....checkout.fetch import (
fetch_checkout_info,
fetch_checkout_lines,
update_delivery_method_lists_for_checkout_info,
)
from ....checkout.utils import add_promo_code_to_checkout
from ...core.descriptions import DEPRECATED_IN_3X_INPUT
from ...core.mutations import BaseMutation
from ...core.scalars import UUID
from ...core.types import CheckoutError
from ...core.validators import validate_one_of_args_is_in_mutation
from ..types import Checkout
from .utils import get_checkout_by_token, update_checkout_shipping_method_if_invalid
class CheckoutAddPromoCode(BaseMutation):
checkout = graphene.Field(
Checkout, description="The checkout with the added gift card or voucher."
)
class Arguments:
checkout_id = graphene.ID(
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use token instead."
),
required=False,
)
token = UUID(description="Checkout token.", required=False)
promo_code = graphene.String(
description="Gift card code or voucher code.", required=True
)
class Meta:
description = "Adds a gift card or a voucher to a checkout."
error_type_class = CheckoutError
error_type_field = "checkout_errors"
@classmethod
def perform_mutation(cls, _root, info, promo_code, checkout_id=None, token=None):
# DEPRECATED
validate_one_of_args_is_in_mutation(
CheckoutErrorCode, "checkout_id", checkout_id, "token", token
)
if token:
checkout = get_checkout_by_token(token)
# DEPRECATED
else:
checkout = cls.get_node_or_error(
info, checkout_id or token, only_type=Checkout, field="checkout_id"
)
validate_checkout_email(checkout)
manager = info.context.plugins
discounts = info.context.discounts
lines, unavailable_variant_pks = fetch_checkout_lines(checkout)
if unavailable_variant_pks:
not_available_variants_ids = {
graphene.Node.to_global_id("ProductVariant", pk)
for pk in unavailable_variant_pks
}
raise ValidationError(
{
"lines": ValidationError(
"Some of the checkout lines variants are unavailable.",
code=CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL.value,
params={"variants": not_available_variants_ids},
)
}
)
shipping_channel_listings = checkout.channel.shipping_method_listings.all()
checkout_info = fetch_checkout_info(
checkout, lines, discounts, manager, shipping_channel_listings
)
add_promo_code_to_checkout(
manager,
checkout_info,
lines,
promo_code,
discounts,
)
update_delivery_method_lists_for_checkout_info(
checkout_info,
checkout_info.checkout.shipping_method,
checkout_info.checkout.collection_point,
checkout_info.shipping_address,
lines,
discounts,
manager,
shipping_channel_listings,
)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
manager.checkout_updated(checkout)
return CheckoutAddPromoCode(checkout=checkout)
``` |
{
"source": "23andMe/seqseek",
"score": 3
} |
#### File: seqseek/seqseek/format_fasta.py
```python
from __future__ import print_function
import argparse
def run(path):
print("Formatting %s" % path)
with open(path) as fasta:
header = ''
first_line = fasta.readline()
if not first_line.startswith('>'):
header = '> ' + path.split('/')[-1].split('.')[0] + '\n'
first_line.replace('\n', '')
clean = fasta.read().replace('\n', '')
with open(path + '.seqseek', 'w') as formatted:
formatted.write(header)
formatted.write(first_line)
formatted.write(clean)
with open(path + '.seqseek') as done:
done.readline()
sequence = done.read()
print("Length is %d" % len(sequence))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("fasta_path")
args = parser.parse_args()
run(args.fasta_path)
``` |
{
"source": "23ccozad/hurricane-wind-speed-cnn",
"score": 3
} |
#### File: 23ccozad/hurricane-wind-speed-cnn/model.py
```python
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
import gc
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
from keras import models
from keras import layers
from keras import metrics
from keras.callbacks import EarlyStopping
def read_and_prepare_data(validation_mode, k=5, augment=True):
if validation_mode == 'k_fold':
# Read in data from files
images = np.load('images.npy')
labels = np.load('labels.npy')
# Split the image and label datasets into k number of subsets
folded_images = []
folded_labels = []
for i in range(k):
start = int((i / k) * len(images))
end = int(((i + 1) / k) * len(images))
folded_images.append(images[start:end])
folded_labels.append(labels[start:end])
# Generate augmented images for each fold
folded_augmented_images = []
folded_augmented_labels = []
for i in range(k):
if augment:
print('\nAugmenting Fold ' + str(i + 1) + ' of ' + str(k))
augmented_images, augmented_labels = augment_images(folded_images[i], folded_labels[i])
folded_augmented_images.append(augmented_images)
folded_augmented_labels.append(augmented_labels)
# Combine the folds into sets for each iteration of the model and standardize the data
train_images = []
train_labels = []
test_images = []
test_labels = []
for i in range(k):
train_images.append(np.concatenate(folded_images[:i] + folded_images[(i+1):]))
train_labels.append(np.concatenate(folded_labels[:i] + folded_labels[(i+1):]))
if augment:
train_images[i] = np.concatenate(([train_images[i]] + folded_augmented_images[:i] + folded_augmented_images[(i + 1):]))
train_labels[i] = np.concatenate(([train_labels[i]] + folded_augmented_labels[:i] + folded_augmented_labels[(i + 1):]))
test_images.append(folded_images[i])
test_labels.append(folded_labels[i])
train_images[i], test_images[i] = standardize_data(train_images[i], test_images[i])
return train_images, train_labels, test_images, test_labels
def augment_images(images, labels):
# Create generators to augment images
from keras.preprocessing import image
flip_generator = image.ImageDataGenerator(
horizontal_flip=True,
vertical_flip=True
)
rotate_generator = image.ImageDataGenerator(
rotation_range=360,
fill_mode='nearest'
)
# Accumulate augmented images and labels
augmented_images = []
augmented_labels = []
# Loop each images in the set to augment
for i in range(len(images)):
# Reshape image for generator
image = np.reshape(images[i], (1, images[i].shape[0], images[i].shape[1], 1))
label = labels[i]
# Reset the number of augmented images have been created to zero
num_new_images = 0
# Generate 2 new images if the image is of a tropical cyclone between 50 and 75 knots
if 50 < label < 75:
for batch in flip_generator.flow(image, batch_size=1):
gc.collect()
new_image = np.reshape(batch[0], (batch[0].shape[0], batch[0].shape[1], 1))
augmented_images.append(new_image)
augmented_labels.append(label)
num_new_images += 1
if num_new_images == 2:
break
# Generate 6 new images if the image is of a tropical cyclone between 75 and 100 knots
elif 75 < label < 100:
for batch in rotate_generator.flow(image, batch_size=1):
gc.collect()
new_image = np.reshape(batch[0], (batch[0].shape[0], batch[0].shape[1], 1))
augmented_images.append(new_image)
augmented_labels.append(label)
num_new_images += 1
if num_new_images == 6:
break
# Generate 12 new images if the image is of a tropical cyclone greater than or equal to 100 knots
elif 100 <= label:
for batch in rotate_generator.flow(image, batch_size=1):
gc.collect()
new_image = np.reshape(batch[0], (batch[0].shape[0], batch[0].shape[1], 1))
augmented_images.append(new_image)
augmented_labels.append(label)
num_new_images += 1
if num_new_images == 12:
break
print_progress('Augmenting Images', i + 1, len(images))
# Convert lists of images/labels into numpy arrays
augmented_images = np.array(augmented_images)
augmented_labels = np.array(augmented_labels)
return augmented_images, augmented_labels
def build_model():
# Build network architecture
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(50, 50, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation=None))
# Configure model optimization
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanAbsoluteError(), metrics.RootMeanSquaredError()])
return model
def train_model(model, train_images, train_labels, test_images, test_labels, show_performance_by_epoch=False):
# Run model and get metrics for each epoch
performance_log = model.fit(
train_images,
train_labels,
callbacks=[EarlyStopping(monitor='val_mean_absolute_error', patience=5, restore_best_weights=True)],
epochs=100,
batch_size=64,
validation_data=(test_images, test_labels))
if show_performance_by_epoch:
performance_by_epoch(performance_log)
return model
def performance_by_epoch(performance_log):
# Get metrics for each epoch after model finishes training
train_loss = performance_log.history['loss']
test_loss = performance_log.history['val_loss']
train_mae = performance_log.history['mean_absolute_error']
test_mae = performance_log.history['val_mean_absolute_error']
epochs = range(1, len(train_loss) + 1)
# Build a dataframe storing epoch metrics
performance_df = pd.DataFrame(columns=['epoch', 'train_or_test', 'loss_or_mae', 'value'])
for i in range(len(train_loss)):
new_row = {'epoch': epochs[i], 'train_or_test': 'train', 'loss_or_mae': 'loss', 'value': train_loss[i]}
performance_df = performance_df.append(new_row, ignore_index=True)
new_row = {'epoch': epochs[i], 'train_or_test': 'test', 'loss_or_mae': 'loss', 'value': test_loss[i]}
performance_df = performance_df.append(new_row, ignore_index=True)
new_row = {'epoch': epochs[i], 'train_or_test': 'train', 'loss_or_mae': 'mae', 'value': train_mae[i]}
performance_df = performance_df.append(new_row, ignore_index=True)
new_row = {'epoch': epochs[i], 'train_or_test': 'test', 'loss_or_mae': 'mae', 'value': test_mae[i]}
performance_df = performance_df.append(new_row, ignore_index=True)
performance_df = performance_df.astype({'epoch': np.int64})
# Plot metrics on graph, fitted with exponential decay curves
lm = sns.lmplot(
x='epoch',
y='value',
data=performance_df,
row='loss_or_mae',
hue='train_or_test', # Note: If epoch = 1, this line causes an error. Make sure epoch >= 2
logx=True,
truncate=False,
sharey=False)
axes = lm.axes
max_mae = performance_df.loc[performance_df.loss_or_mae == 'mae']['value'].max()
min_mae = performance_df.loc[performance_df.loss_or_mae == 'mae']['value'].min()
axes[1, 0].set_ylim(min_mae - min_mae * 0.2, max_mae + max_mae * 0.2)
plt.show()
def generate_predictions(model, test_images, test_labels):
# Run validation data through model and print mean absolute error
raw_predictions = model.predict(test_images)
raw_predictions = raw_predictions.flatten()
# Build a dataframe storing data for each prediction made by the model
processed_predictions = pd.DataFrame(columns=['prediction', 'actual', 'abs_error', 'category'])
for i in range(len(raw_predictions)):
abs_error = abs(raw_predictions[i] - test_labels[i])
new_row = {
'prediction': raw_predictions[i],
'actual': test_labels[i],
'abs_error': abs_error,
'abs_error_squared': abs_error ** 2,
'category': category_of(test_labels[i])}
processed_predictions = processed_predictions.append(new_row, ignore_index=True)
print_progress('Processing Predictions', i + 1, len(raw_predictions))
return processed_predictions
def show_validation_results(predictions, show_plots=True, print_error=True):
print('\n\nRESULTS')
if print_error:
mae = predictions['abs_error'].mean()
print('\nMean Absolute Error: ' + str(round(float(mae), 2)) + ' knots')
rmse = predictions['abs_error_squared'].mean() ** 0.5
print('Root Mean Square Error: ' + str(round(float(rmse), 2)) + ' knots')
if show_plots:
# List of categories in order of ascending strength
categories = ['T. Depression', 'T. Storm', 'Category 1', 'Category 2', 'Category 3', 'Category 4', 'Category 5']
# Show bar graph of median absolute error for each category
plt.figure(figsize=(10, 5), dpi=300)
sns.barplot(
x='category',
y='abs_error',
data=predictions,
estimator=np.median,
order=categories)
sns.despine()
plt.xlabel("Hurricane Strength")
plt.ylabel("Absolute Error")
plt.title("Median Absolute Error in Neural Network's Predictions By Category")
plt.savefig('median_abs_error_by_category.png')
print('Graph of median absolute error by category saved as median_abs_error_by_category.png')
plt.clf()
# Show density plot of error for each category
for category in categories:
num_samples_tested = len(predictions.loc[predictions.category == category]['abs_error'])
sns.distplot(
predictions.loc[predictions.category == category]['abs_error'],
label=category + ' (' + str(num_samples_tested) + ' samples tested)',
hist=False,
kde_kws={"shade": True})
sns.despine()
plt.xlabel("Absolute Error")
plt.title("Distribution of Absolute Error By Category")
plt.legend()
plt.xlim(0, None)
plt.ylim(0, None)
plt.savefig('error_dist_by_category.png')
print('Graph of error distribution by category saved as error_dist_by_category.png')
def standardize_data(train_images, test_images):
train_images[train_images < 0] = 0
test_images[test_images < 0] = 0
st_dev = np.std(train_images)
mean = np.mean(train_images)
train_images = np.divide(np.subtract(train_images, mean), st_dev)
test_images = np.divide(np.subtract(test_images, mean), st_dev)
return train_images, test_images
def print_progress(action, progress, total):
percent_progress = round((progress / total) * 100, 1)
print('\r' + action + '... ' + str(percent_progress) + '% (' + str(progress) + ' of ' + str(total) + ')', end='')
def category_of(wind_speed):
if wind_speed <= 33:
return 'T. Depression'
elif wind_speed <= 64:
return 'T. Storm'
elif wind_speed <= 83:
return 'Category 1'
elif wind_speed <= 95:
return 'Category 2'
elif wind_speed <= 113:
return 'Category 3'
elif wind_speed <= 134:
return 'Category 4'
else:
return 'Category 5'
if __name__ == "__main__":
# Specify whether the script should use Keras's ImageDataGenerator to augment the training dataset. Assigning
# this variable to True will improve accuracy, but will also increase execution time.
AUGMENT = True
# Specify how many folds in the k-fold validation process. Can be any integer greater than or equal to 2. Larger
# integers will increase execution time.
NUM_FOLDS = 5
train_images, train_labels, test_images, test_labels = read_and_prepare_data('k_fold', NUM_FOLDS, augment=AUGMENT)
model = build_model()
predictions = pd.DataFrame(columns=['prediction', 'actual', 'abs_error', 'category'])
for i in range(NUM_FOLDS):
print('\n\nTraining Fold ' + str(i + 1) + ' of ' + str(NUM_FOLDS) + '\n')
model = train_model(model, train_images[i], train_labels[i], test_images[i], test_labels[i])
kth_fold_predictions = generate_predictions(model, test_images[i], test_labels[i])
predictions = predictions.append(kth_fold_predictions, ignore_index=True)
show_validation_results(predictions)
``` |
{
"source": "23doors/pulumi-gcp",
"score": 2
} |
#### File: pulumi_gcp/accesscontextmanager/service_perimeter_resource.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ServicePerimeterResource(pulumi.CustomResource):
perimeter_name: pulumi.Output[str]
resource: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, perimeter_name=None, resource=None, __props__=None, __name__=None, __opts__=None):
"""
Allows configuring a single GCP resource that should be inside of a service perimeter.
This resource is intended to be used in cases where it is not possible to compile a full list
of projects to include in a `accesscontextmanager.ServicePerimeter` resource,
to enable them to be added separately.
> **Note:** If this resource is used alongside a `accesscontextmanager.ServicePerimeter` resource,
the service perimeter resource must have a `lifecycle` block with `ignore_changes = [status[0].resources]` so
they don't fight over which resources should be in the policy.
To get more information about ServicePerimeterResource, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/access_context_manager_service_perimeter_resource.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if perimeter_name is None:
raise TypeError("Missing required property 'perimeter_name'")
__props__['perimeter_name'] = perimeter_name
if resource is None:
raise TypeError("Missing required property 'resource'")
__props__['resource'] = resource
super(ServicePerimeterResource, __self__).__init__(
'gcp:accesscontextmanager/servicePerimeterResource:ServicePerimeterResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, perimeter_name=None, resource=None):
"""
Get an existing ServicePerimeterResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/access_context_manager_service_perimeter_resource.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["perimeter_name"] = perimeter_name
__props__["resource"] = resource
return ServicePerimeterResource(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/bigquery/dataset.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Dataset(pulumi.CustomResource):
accesses: pulumi.Output[list]
creation_time: pulumi.Output[float]
dataset_id: pulumi.Output[str]
default_encryption_configuration: pulumi.Output[dict]
default_partition_expiration_ms: pulumi.Output[float]
default_table_expiration_ms: pulumi.Output[float]
delete_contents_on_destroy: pulumi.Output[bool]
"""
If set to `true`, delete all the tables in the
dataset when destroying the resource; otherwise,
destroying the resource will fail if tables are present.
"""
description: pulumi.Output[str]
etag: pulumi.Output[str]
friendly_name: pulumi.Output[str]
labels: pulumi.Output[dict]
last_modified_time: pulumi.Output[float]
location: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
def __init__(__self__, resource_name, opts=None, accesses=None, dataset_id=None, default_encryption_configuration=None, default_partition_expiration_ms=None, default_table_expiration_ms=None, delete_contents_on_destroy=None, description=None, friendly_name=None, labels=None, location=None, project=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Dataset resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] delete_contents_on_destroy: If set to `true`, delete all the tables in the
dataset when destroying the resource; otherwise,
destroying the resource will fail if tables are present.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **accesses** object supports the following:
* `domain` (`pulumi.Input[str]`)
* `groupByEmail` (`pulumi.Input[str]`)
* `role` (`pulumi.Input[str]`)
* `specialGroup` (`pulumi.Input[str]`)
* `userByEmail` (`pulumi.Input[str]`)
* `view` (`pulumi.Input[dict]`)
* `dataset_id` (`pulumi.Input[str]`)
* `projectId` (`pulumi.Input[str]`)
* `table_id` (`pulumi.Input[str]`)
The **default_encryption_configuration** object supports the following:
* `kmsKeyName` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/bigquery_dataset.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['accesses'] = accesses
if dataset_id is None:
raise TypeError("Missing required property 'dataset_id'")
__props__['dataset_id'] = dataset_id
__props__['default_encryption_configuration'] = default_encryption_configuration
__props__['default_partition_expiration_ms'] = default_partition_expiration_ms
__props__['default_table_expiration_ms'] = default_table_expiration_ms
__props__['delete_contents_on_destroy'] = delete_contents_on_destroy
__props__['description'] = description
__props__['friendly_name'] = friendly_name
__props__['labels'] = labels
__props__['location'] = location
__props__['project'] = project
__props__['creation_time'] = None
__props__['etag'] = None
__props__['last_modified_time'] = None
__props__['self_link'] = None
super(Dataset, __self__).__init__(
'gcp:bigquery/dataset:Dataset',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, accesses=None, creation_time=None, dataset_id=None, default_encryption_configuration=None, default_partition_expiration_ms=None, default_table_expiration_ms=None, delete_contents_on_destroy=None, description=None, etag=None, friendly_name=None, labels=None, last_modified_time=None, location=None, project=None, self_link=None):
"""
Get an existing Dataset resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] delete_contents_on_destroy: If set to `true`, delete all the tables in the
dataset when destroying the resource; otherwise,
destroying the resource will fail if tables are present.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
The **accesses** object supports the following:
* `domain` (`pulumi.Input[str]`)
* `groupByEmail` (`pulumi.Input[str]`)
* `role` (`pulumi.Input[str]`)
* `specialGroup` (`pulumi.Input[str]`)
* `userByEmail` (`pulumi.Input[str]`)
* `view` (`pulumi.Input[dict]`)
* `dataset_id` (`pulumi.Input[str]`)
* `projectId` (`pulumi.Input[str]`)
* `table_id` (`pulumi.Input[str]`)
The **default_encryption_configuration** object supports the following:
* `kmsKeyName` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/bigquery_dataset.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["accesses"] = accesses
__props__["creation_time"] = creation_time
__props__["dataset_id"] = dataset_id
__props__["default_encryption_configuration"] = default_encryption_configuration
__props__["default_partition_expiration_ms"] = default_partition_expiration_ms
__props__["default_table_expiration_ms"] = default_table_expiration_ms
__props__["delete_contents_on_destroy"] = delete_contents_on_destroy
__props__["description"] = description
__props__["etag"] = etag
__props__["friendly_name"] = friendly_name
__props__["labels"] = labels
__props__["last_modified_time"] = last_modified_time
__props__["location"] = location
__props__["project"] = project
__props__["self_link"] = self_link
return Dataset(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/compute/backend_service.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class BackendService(pulumi.CustomResource):
affinity_cookie_ttl_sec: pulumi.Output[float]
backends: pulumi.Output[list]
cdn_policy: pulumi.Output[dict]
circuit_breakers: pulumi.Output[dict]
connection_draining_timeout_sec: pulumi.Output[float]
consistent_hash: pulumi.Output[dict]
creation_timestamp: pulumi.Output[str]
custom_request_headers: pulumi.Output[list]
description: pulumi.Output[str]
enable_cdn: pulumi.Output[bool]
fingerprint: pulumi.Output[str]
health_checks: pulumi.Output[str]
iap: pulumi.Output[dict]
load_balancing_scheme: pulumi.Output[str]
locality_lb_policy: pulumi.Output[str]
log_config: pulumi.Output[dict]
name: pulumi.Output[str]
outlier_detection: pulumi.Output[dict]
port_name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
protocol: pulumi.Output[str]
security_policy: pulumi.Output[str]
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
session_affinity: pulumi.Output[str]
timeout_sec: pulumi.Output[float]
def __init__(__self__, resource_name, opts=None, affinity_cookie_ttl_sec=None, backends=None, cdn_policy=None, circuit_breakers=None, connection_draining_timeout_sec=None, consistent_hash=None, custom_request_headers=None, description=None, enable_cdn=None, health_checks=None, iap=None, load_balancing_scheme=None, locality_lb_policy=None, log_config=None, name=None, outlier_detection=None, port_name=None, project=None, protocol=None, security_policy=None, session_affinity=None, timeout_sec=None, __props__=None, __name__=None, __opts__=None):
"""
Create a BackendService resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **backends** object supports the following:
* `balancingMode` (`pulumi.Input[str]`)
* `capacityScaler` (`pulumi.Input[float]`)
* `description` (`pulumi.Input[str]`)
* `group` (`pulumi.Input[str]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxConnectionsPerEndpoint` (`pulumi.Input[float]`)
* `maxConnectionsPerInstance` (`pulumi.Input[float]`)
* `maxRate` (`pulumi.Input[float]`)
* `maxRatePerEndpoint` (`pulumi.Input[float]`)
* `maxRatePerInstance` (`pulumi.Input[float]`)
* `maxUtilization` (`pulumi.Input[float]`)
The **cdn_policy** object supports the following:
* `cacheKeyPolicy` (`pulumi.Input[dict]`)
* `includeHost` (`pulumi.Input[bool]`)
* `includeProtocol` (`pulumi.Input[bool]`)
* `includeQueryString` (`pulumi.Input[bool]`)
* `queryStringBlacklists` (`pulumi.Input[list]`)
* `queryStringWhitelists` (`pulumi.Input[list]`)
* `signedUrlCacheMaxAgeSec` (`pulumi.Input[float]`)
The **circuit_breakers** object supports the following:
* `connectTimeout` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxPendingRequests` (`pulumi.Input[float]`)
* `maxRequests` (`pulumi.Input[float]`)
* `maxRequestsPerConnection` (`pulumi.Input[float]`)
* `maxRetries` (`pulumi.Input[float]`)
The **consistent_hash** object supports the following:
* `httpCookie` (`pulumi.Input[dict]`)
* `name` (`pulumi.Input[str]`)
* `path` (`pulumi.Input[str]`)
* `ttl` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `httpHeaderName` (`pulumi.Input[str]`)
* `minimumRingSize` (`pulumi.Input[float]`)
The **iap** object supports the following:
* `oauth2ClientId` (`pulumi.Input[str]`)
* `oauth2ClientSecret` (`pulumi.Input[str]`)
* `oauth2ClientSecretSha256` (`pulumi.Input[str]`)
The **log_config** object supports the following:
* `enable` (`pulumi.Input[bool]`)
* `sampleRate` (`pulumi.Input[float]`)
The **outlier_detection** object supports the following:
* `baseEjectionTime` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `consecutiveErrors` (`pulumi.Input[float]`)
* `consecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingConsecutiveErrors` (`pulumi.Input[float]`)
* `enforcingConsecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingSuccessRate` (`pulumi.Input[float]`)
* `interval` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxEjectionPercent` (`pulumi.Input[float]`)
* `successRateMinimumHosts` (`pulumi.Input[float]`)
* `successRateRequestVolume` (`pulumi.Input[float]`)
* `successRateStdevFactor` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_backend_service.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['affinity_cookie_ttl_sec'] = affinity_cookie_ttl_sec
__props__['backends'] = backends
__props__['cdn_policy'] = cdn_policy
__props__['circuit_breakers'] = circuit_breakers
__props__['connection_draining_timeout_sec'] = connection_draining_timeout_sec
__props__['consistent_hash'] = consistent_hash
__props__['custom_request_headers'] = custom_request_headers
__props__['description'] = description
__props__['enable_cdn'] = enable_cdn
if health_checks is None:
raise TypeError("Missing required property 'health_checks'")
__props__['health_checks'] = health_checks
__props__['iap'] = iap
__props__['load_balancing_scheme'] = load_balancing_scheme
__props__['locality_lb_policy'] = locality_lb_policy
__props__['log_config'] = log_config
__props__['name'] = name
__props__['outlier_detection'] = outlier_detection
__props__['port_name'] = port_name
__props__['project'] = project
__props__['protocol'] = protocol
__props__['security_policy'] = security_policy
__props__['session_affinity'] = session_affinity
__props__['timeout_sec'] = timeout_sec
__props__['creation_timestamp'] = None
__props__['fingerprint'] = None
__props__['self_link'] = None
super(BackendService, __self__).__init__(
'gcp:compute/backendService:BackendService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, affinity_cookie_ttl_sec=None, backends=None, cdn_policy=None, circuit_breakers=None, connection_draining_timeout_sec=None, consistent_hash=None, creation_timestamp=None, custom_request_headers=None, description=None, enable_cdn=None, fingerprint=None, health_checks=None, iap=None, load_balancing_scheme=None, locality_lb_policy=None, log_config=None, name=None, outlier_detection=None, port_name=None, project=None, protocol=None, security_policy=None, self_link=None, session_affinity=None, timeout_sec=None):
"""
Get an existing BackendService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
The **backends** object supports the following:
* `balancingMode` (`pulumi.Input[str]`)
* `capacityScaler` (`pulumi.Input[float]`)
* `description` (`pulumi.Input[str]`)
* `group` (`pulumi.Input[str]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxConnectionsPerEndpoint` (`pulumi.Input[float]`)
* `maxConnectionsPerInstance` (`pulumi.Input[float]`)
* `maxRate` (`pulumi.Input[float]`)
* `maxRatePerEndpoint` (`pulumi.Input[float]`)
* `maxRatePerInstance` (`pulumi.Input[float]`)
* `maxUtilization` (`pulumi.Input[float]`)
The **cdn_policy** object supports the following:
* `cacheKeyPolicy` (`pulumi.Input[dict]`)
* `includeHost` (`pulumi.Input[bool]`)
* `includeProtocol` (`pulumi.Input[bool]`)
* `includeQueryString` (`pulumi.Input[bool]`)
* `queryStringBlacklists` (`pulumi.Input[list]`)
* `queryStringWhitelists` (`pulumi.Input[list]`)
* `signedUrlCacheMaxAgeSec` (`pulumi.Input[float]`)
The **circuit_breakers** object supports the following:
* `connectTimeout` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxPendingRequests` (`pulumi.Input[float]`)
* `maxRequests` (`pulumi.Input[float]`)
* `maxRequestsPerConnection` (`pulumi.Input[float]`)
* `maxRetries` (`pulumi.Input[float]`)
The **consistent_hash** object supports the following:
* `httpCookie` (`pulumi.Input[dict]`)
* `name` (`pulumi.Input[str]`)
* `path` (`pulumi.Input[str]`)
* `ttl` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `httpHeaderName` (`pulumi.Input[str]`)
* `minimumRingSize` (`pulumi.Input[float]`)
The **iap** object supports the following:
* `oauth2ClientId` (`pulumi.Input[str]`)
* `oauth2ClientSecret` (`pulumi.Input[str]`)
* `oauth2ClientSecretSha256` (`pulumi.Input[str]`)
The **log_config** object supports the following:
* `enable` (`pulumi.Input[bool]`)
* `sampleRate` (`pulumi.Input[float]`)
The **outlier_detection** object supports the following:
* `baseEjectionTime` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `consecutiveErrors` (`pulumi.Input[float]`)
* `consecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingConsecutiveErrors` (`pulumi.Input[float]`)
* `enforcingConsecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingSuccessRate` (`pulumi.Input[float]`)
* `interval` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxEjectionPercent` (`pulumi.Input[float]`)
* `successRateMinimumHosts` (`pulumi.Input[float]`)
* `successRateRequestVolume` (`pulumi.Input[float]`)
* `successRateStdevFactor` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_backend_service.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["affinity_cookie_ttl_sec"] = affinity_cookie_ttl_sec
__props__["backends"] = backends
__props__["cdn_policy"] = cdn_policy
__props__["circuit_breakers"] = circuit_breakers
__props__["connection_draining_timeout_sec"] = connection_draining_timeout_sec
__props__["consistent_hash"] = consistent_hash
__props__["creation_timestamp"] = creation_timestamp
__props__["custom_request_headers"] = custom_request_headers
__props__["description"] = description
__props__["enable_cdn"] = enable_cdn
__props__["fingerprint"] = fingerprint
__props__["health_checks"] = health_checks
__props__["iap"] = iap
__props__["load_balancing_scheme"] = load_balancing_scheme
__props__["locality_lb_policy"] = locality_lb_policy
__props__["log_config"] = log_config
__props__["name"] = name
__props__["outlier_detection"] = outlier_detection
__props__["port_name"] = port_name
__props__["project"] = project
__props__["protocol"] = protocol
__props__["security_policy"] = security_policy
__props__["self_link"] = self_link
__props__["session_affinity"] = session_affinity
__props__["timeout_sec"] = timeout_sec
return BackendService(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/compute/get_backend_bucket.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetBackendBucketResult:
"""
A collection of values returned by getBackendBucket.
"""
def __init__(__self__, bucket_name=None, cdn_policies=None, creation_timestamp=None, description=None, enable_cdn=None, name=None, project=None, self_link=None, id=None):
if bucket_name and not isinstance(bucket_name, str):
raise TypeError("Expected argument 'bucket_name' to be a str")
__self__.bucket_name = bucket_name
if cdn_policies and not isinstance(cdn_policies, list):
raise TypeError("Expected argument 'cdn_policies' to be a list")
__self__.cdn_policies = cdn_policies
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
__self__.creation_timestamp = creation_timestamp
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
__self__.description = description
if enable_cdn and not isinstance(enable_cdn, bool):
raise TypeError("Expected argument 'enable_cdn' to be a bool")
__self__.enable_cdn = enable_cdn
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
__self__.self_link = self_link
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetBackendBucketResult(GetBackendBucketResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBackendBucketResult(
bucket_name=self.bucket_name,
cdn_policies=self.cdn_policies,
creation_timestamp=self.creation_timestamp,
description=self.description,
enable_cdn=self.enable_cdn,
name=self.name,
project=self.project,
self_link=self.self_link,
id=self.id)
def get_backend_bucket(name=None,project=None,opts=None):
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getBackendBucket:getBackendBucket', __args__, opts=opts).value
return AwaitableGetBackendBucketResult(
bucket_name=__ret__.get('bucketName'),
cdn_policies=__ret__.get('cdnPolicies'),
creation_timestamp=__ret__.get('creationTimestamp'),
description=__ret__.get('description'),
enable_cdn=__ret__.get('enableCdn'),
name=__ret__.get('name'),
project=__ret__.get('project'),
self_link=__ret__.get('selfLink'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/compute/get_certificate.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, certificate=None, certificate_id=None, creation_timestamp=None, description=None, name=None, name_prefix=None, private_key=None, project=None, self_link=None, id=None):
if certificate and not isinstance(certificate, str):
raise TypeError("Expected argument 'certificate' to be a str")
__self__.certificate = certificate
if certificate_id and not isinstance(certificate_id, float):
raise TypeError("Expected argument 'certificate_id' to be a float")
__self__.certificate_id = certificate_id
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
__self__.creation_timestamp = creation_timestamp
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
__self__.description = description
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if name_prefix and not isinstance(name_prefix, str):
raise TypeError("Expected argument 'name_prefix' to be a str")
__self__.name_prefix = name_prefix
if private_key and not isinstance(private_key, str):
raise TypeError("Expected argument 'private_key' to be a str")
__self__.private_key = private_key
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
__self__.self_link = self_link
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
certificate=self.certificate,
certificate_id=self.certificate_id,
creation_timestamp=self.creation_timestamp,
description=self.description,
name=self.name,
name_prefix=self.name_prefix,
private_key=self.private_key,
project=self.project,
self_link=self.self_link,
id=self.id)
def get_certificate(name=None,project=None,opts=None):
"""
Get info about a Google Compute SSL Certificate from its name.
:param str name: The name of the certificate.
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/compute_ssl_certificate.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getCertificate:getCertificate', __args__, opts=opts).value
return AwaitableGetCertificateResult(
certificate=__ret__.get('certificate'),
certificate_id=__ret__.get('certificateId'),
creation_timestamp=__ret__.get('creationTimestamp'),
description=__ret__.get('description'),
name=__ret__.get('name'),
name_prefix=__ret__.get('namePrefix'),
private_key=__ret__.get('privateKey'),
project=__ret__.get('project'),
self_link=__ret__.get('selfLink'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/compute/get_network_endpoint_group.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetNetworkEndpointGroupResult:
"""
A collection of values returned by getNetworkEndpointGroup.
"""
def __init__(__self__, default_port=None, description=None, name=None, network=None, network_endpoint_type=None, project=None, self_link=None, size=None, subnetwork=None, zone=None, id=None):
if default_port and not isinstance(default_port, float):
raise TypeError("Expected argument 'default_port' to be a float")
__self__.default_port = default_port
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
__self__.description = description
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
__self__.network = network
if network_endpoint_type and not isinstance(network_endpoint_type, str):
raise TypeError("Expected argument 'network_endpoint_type' to be a str")
__self__.network_endpoint_type = network_endpoint_type
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
__self__.self_link = self_link
if size and not isinstance(size, float):
raise TypeError("Expected argument 'size' to be a float")
__self__.size = size
if subnetwork and not isinstance(subnetwork, str):
raise TypeError("Expected argument 'subnetwork' to be a str")
__self__.subnetwork = subnetwork
if zone and not isinstance(zone, str):
raise TypeError("Expected argument 'zone' to be a str")
__self__.zone = zone
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetNetworkEndpointGroupResult(GetNetworkEndpointGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkEndpointGroupResult(
default_port=self.default_port,
description=self.description,
name=self.name,
network=self.network,
network_endpoint_type=self.network_endpoint_type,
project=self.project,
self_link=self.self_link,
size=self.size,
subnetwork=self.subnetwork,
zone=self.zone,
id=self.id)
def get_network_endpoint_group(name=None,self_link=None,zone=None,opts=None):
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['selfLink'] = self_link
__args__['zone'] = zone
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getNetworkEndpointGroup:getNetworkEndpointGroup', __args__, opts=opts).value
return AwaitableGetNetworkEndpointGroupResult(
default_port=__ret__.get('defaultPort'),
description=__ret__.get('description'),
name=__ret__.get('name'),
network=__ret__.get('network'),
network_endpoint_type=__ret__.get('networkEndpointType'),
project=__ret__.get('project'),
self_link=__ret__.get('selfLink'),
size=__ret__.get('size'),
subnetwork=__ret__.get('subnetwork'),
zone=__ret__.get('zone'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/compute/get_subnetwork.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetSubnetworkResult:
"""
A collection of values returned by getSubnetwork.
"""
def __init__(__self__, description=None, gateway_address=None, ip_cidr_range=None, name=None, network=None, private_ip_google_access=None, project=None, region=None, secondary_ip_ranges=None, self_link=None, id=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
__self__.description = description
"""
Description of this subnetwork.
"""
if gateway_address and not isinstance(gateway_address, str):
raise TypeError("Expected argument 'gateway_address' to be a str")
__self__.gateway_address = gateway_address
"""
The IP address of the gateway.
"""
if ip_cidr_range and not isinstance(ip_cidr_range, str):
raise TypeError("Expected argument 'ip_cidr_range' to be a str")
__self__.ip_cidr_range = ip_cidr_range
"""
The range of IP addresses belonging to this subnetwork
secondary range.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
__self__.network = network
"""
The network name or resource link to the parent
network of this subnetwork.
"""
if private_ip_google_access and not isinstance(private_ip_google_access, bool):
raise TypeError("Expected argument 'private_ip_google_access' to be a bool")
__self__.private_ip_google_access = private_ip_google_access
"""
Whether the VMs in this subnet
can access Google services without assigned external IP
addresses.
"""
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
__self__.region = region
if secondary_ip_ranges and not isinstance(secondary_ip_ranges, list):
raise TypeError("Expected argument 'secondary_ip_ranges' to be a list")
__self__.secondary_ip_ranges = secondary_ip_ranges
"""
An array of configurations for secondary IP ranges for
VM instances contained in this subnetwork. Structure is documented below.
"""
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
__self__.self_link = self_link
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetSubnetworkResult(GetSubnetworkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubnetworkResult(
description=self.description,
gateway_address=self.gateway_address,
ip_cidr_range=self.ip_cidr_range,
name=self.name,
network=self.network,
private_ip_google_access=self.private_ip_google_access,
project=self.project,
region=self.region,
secondary_ip_ranges=self.secondary_ip_ranges,
self_link=self.self_link,
id=self.id)
def get_subnetwork(name=None,project=None,region=None,self_link=None,opts=None):
"""
Get a subnetwork within GCE from its name and region.
:param str name: The name of the subnetwork. One of `name` or `self_link`
must be specified.
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param str region: The region this subnetwork has been created in. If
unspecified, this defaults to the region configured in the provider.
:param str self_link: The self link of the subnetwork. If `self_link` is
specified, `name`, `project`, and `region` are ignored.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/compute_subnetwork.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
__args__['region'] = region
__args__['selfLink'] = self_link
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getSubnetwork:getSubnetwork', __args__, opts=opts).value
return AwaitableGetSubnetworkResult(
description=__ret__.get('description'),
gateway_address=__ret__.get('gatewayAddress'),
ip_cidr_range=__ret__.get('ipCidrRange'),
name=__ret__.get('name'),
network=__ret__.get('network'),
private_ip_google_access=__ret__.get('privateIpGoogleAccess'),
project=__ret__.get('project'),
region=__ret__.get('region'),
secondary_ip_ranges=__ret__.get('secondaryIpRanges'),
self_link=__ret__.get('selfLink'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/compute/instance_group.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class InstanceGroup(pulumi.CustomResource):
description: pulumi.Output[str]
"""
An optional textual description of the instance
group.
"""
instances: pulumi.Output[list]
"""
List of instances in the group. They should be given
as self_link URLs. When adding instances they must all be in the same
network and zone as the instance group.
"""
name: pulumi.Output[str]
"""
The name of the instance group. Must be 1-63
characters long and comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
include lowercase letters, numbers, and hyphens.
"""
named_ports: pulumi.Output[list]
"""
The named port configuration. See the section below
for details on configuration.
* `name` (`str`) - The name of the instance group. Must be 1-63
characters long and comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
include lowercase letters, numbers, and hyphens.
* `port` (`float`)
"""
network: pulumi.Output[str]
"""
The URL of the network the instance group is in. If
this is different from the network where the instances are in, the creation
fails. Defaults to the network where the instances are in (if neither
`network` nor `instances` is specified, this field will be blank).
"""
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
size: pulumi.Output[float]
"""
The number of instances in the group.
"""
zone: pulumi.Output[str]
"""
The zone that this instance group should be created in.
"""
def __init__(__self__, resource_name, opts=None, description=None, instances=None, name=None, named_ports=None, network=None, project=None, zone=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a group of dissimilar Compute Engine virtual machine instances.
For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/#unmanaged_instance_groups)
and [API](https://cloud.google.com/compute/docs/reference/latest/instanceGroups)
> Recreating an instance group that's in use by another resource will give a
`resourceInUseByAnotherResource` error.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional textual description of the instance
group.
:param pulumi.Input[list] instances: List of instances in the group. They should be given
as self_link URLs. When adding instances they must all be in the same
network and zone as the instance group.
:param pulumi.Input[str] name: The name of the instance group. Must be 1-63
characters long and comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
include lowercase letters, numbers, and hyphens.
:param pulumi.Input[list] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] network: The URL of the network the instance group is in. If
this is different from the network where the instances are in, the creation
fails. Defaults to the network where the instances are in (if neither
`network` nor `instances` is specified, this field will be blank).
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] zone: The zone that this instance group should be created in.
The **named_ports** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the instance group. Must be 1-63
characters long and comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
include lowercase letters, numbers, and hyphens.
* `port` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_instance_group.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['instances'] = instances
__props__['name'] = name
__props__['named_ports'] = named_ports
__props__['network'] = network
__props__['project'] = project
__props__['zone'] = zone
__props__['self_link'] = None
__props__['size'] = None
super(InstanceGroup, __self__).__init__(
'gcp:compute/instanceGroup:InstanceGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, description=None, instances=None, name=None, named_ports=None, network=None, project=None, self_link=None, size=None, zone=None):
"""
Get an existing InstanceGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional textual description of the instance
group.
:param pulumi.Input[list] instances: List of instances in the group. They should be given
as self_link URLs. When adding instances they must all be in the same
network and zone as the instance group.
:param pulumi.Input[str] name: The name of the instance group. Must be 1-63
characters long and comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
include lowercase letters, numbers, and hyphens.
:param pulumi.Input[list] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] network: The URL of the network the instance group is in. If
this is different from the network where the instances are in, the creation
fails. Defaults to the network where the instances are in (if neither
`network` nor `instances` is specified, this field will be blank).
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[float] size: The number of instances in the group.
:param pulumi.Input[str] zone: The zone that this instance group should be created in.
The **named_ports** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the instance group. Must be 1-63
characters long and comply with
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
include lowercase letters, numbers, and hyphens.
* `port` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_instance_group.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["instances"] = instances
__props__["name"] = name
__props__["named_ports"] = named_ports
__props__["network"] = network
__props__["project"] = project
__props__["self_link"] = self_link
__props__["size"] = size
__props__["zone"] = zone
return InstanceGroup(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/compute/instance_template.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class InstanceTemplate(pulumi.CustomResource):
can_ip_forward: pulumi.Output[bool]
"""
Whether to allow sending and receiving of
packets with non-matching source or destination IPs. This defaults to false.
"""
description: pulumi.Output[str]
"""
A brief description of this resource.
"""
disks: pulumi.Output[list]
"""
Disks to attach to instances created from this template.
This can be specified multiple times for multiple disks. Structure is
documented below.
* `autoDelete` (`bool`)
* `boot` (`bool`)
* `device_name` (`str`)
* `disk_encryption_key` (`dict`)
* `kmsKeySelfLink` (`str`)
* `diskName` (`str`)
* `disk_size_gb` (`float`)
* `diskType` (`str`)
* `interface` (`str`)
* `labels` (`dict`) - A set of key/value label pairs to assign to instances
created from this template,
* `mode` (`str`)
* `source` (`str`)
* `sourceImage` (`str`)
* `type` (`str`)
"""
enable_display: pulumi.Output[bool]
"""
Enable [Virtual Displays](https://cloud.google.com/compute/docs/instances/enable-instance-virtual-display#verify_display_driver) on this instance.
**Note**: `allow_stopping_for_update` must be set to true in order to update this field.
"""
guest_accelerators: pulumi.Output[list]
"""
List of the type and count of accelerator cards attached to the instance. Structure documented below.
* `count` (`float`)
* `type` (`str`)
"""
instance_description: pulumi.Output[str]
"""
A brief description to use for instances
created from this template.
"""
labels: pulumi.Output[dict]
"""
A set of key/value label pairs to assign to instances
created from this template,
"""
machine_type: pulumi.Output[str]
"""
The machine type to create.
"""
metadata: pulumi.Output[dict]
"""
Metadata key/value pairs to make available from
within instances created from this template.
"""
metadata_fingerprint: pulumi.Output[str]
"""
The unique fingerprint of the metadata.
"""
metadata_startup_script: pulumi.Output[str]
"""
An alternative to using the
startup-script metadata key, mostly to match the compute_instance resource.
This replaces the startup-script metadata key on the created instance and
thus the two mechanisms are not allowed to be used simultaneously.
"""
min_cpu_platform: pulumi.Output[str]
"""
Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as
`Intel Haswell` or `Intel Skylake`. See the complete list [here](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform).
"""
name: pulumi.Output[str]
"""
The name of the instance template. If you leave
this blank, this provider will auto-generate a unique name.
"""
name_prefix: pulumi.Output[str]
"""
Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
"""
network_interfaces: pulumi.Output[list]
"""
Networks to attach to instances created from
this template. This can be specified multiple times for multiple networks.
Structure is documented below.
* `accessConfigs` (`list`)
* `natIp` (`str`)
* `network_tier` (`str`)
* `aliasIpRanges` (`list`)
* `ip_cidr_range` (`str`)
* `subnetworkRangeName` (`str`)
* `network` (`str`)
* `networkIp` (`str`)
* `subnetwork` (`str`)
* `subnetworkProject` (`str`)
"""
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
region: pulumi.Output[str]
"""
An instance template is a global resource that is not
bound to a zone or a region. However, you can still specify some regional
resources in an instance template, which restricts the template to the
region where that resource resides. For example, a custom `subnetwork`
resource is tied to a specific region. Defaults to the region of the
Provider if no value is given.
"""
scheduling: pulumi.Output[dict]
"""
The scheduling strategy to use. More details about
this configuration option are detailed below.
* `automaticRestart` (`bool`)
* `nodeAffinities` (`list`)
* `key` (`str`)
* `operator` (`str`)
* `values` (`list`)
* `onHostMaintenance` (`str`)
* `preemptible` (`bool`)
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
service_account: pulumi.Output[dict]
"""
Service account to attach to the instance. Structure is documented below.
* `email` (`str`)
* `scopes` (`list`)
"""
shielded_instance_config: pulumi.Output[dict]
"""
Enable [Shielded VM](https://cloud.google.com/security/shielded-cloud/shielded-vm) on this instance. Shielded VM provides verifiable integrity to prevent against malware and rootkits. Defaults to disabled. Structure is documented below.
**Note**: `shielded_instance_config` can only be used with boot images with shielded vm support. See the complete list [here](https://cloud.google.com/compute/docs/images#shielded-images).
* `enableIntegrityMonitoring` (`bool`)
* `enableSecureBoot` (`bool`)
* `enableVtpm` (`bool`)
"""
tags: pulumi.Output[list]
"""
Tags to attach to the instance.
"""
tags_fingerprint: pulumi.Output[str]
"""
The unique fingerprint of the tags.
"""
def __init__(__self__, resource_name, opts=None, can_ip_forward=None, description=None, disks=None, enable_display=None, guest_accelerators=None, instance_description=None, labels=None, machine_type=None, metadata=None, metadata_startup_script=None, min_cpu_platform=None, name=None, name_prefix=None, network_interfaces=None, project=None, region=None, scheduling=None, service_account=None, shielded_instance_config=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a VM instance template resource within GCE. For more information see
[the official documentation](https://cloud.google.com/compute/docs/instance-templates)
and
[API](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] can_ip_forward: Whether to allow sending and receiving of
packets with non-matching source or destination IPs. This defaults to false.
:param pulumi.Input[str] description: A brief description of this resource.
:param pulumi.Input[list] disks: Disks to attach to instances created from this template.
This can be specified multiple times for multiple disks. Structure is
documented below.
:param pulumi.Input[bool] enable_display: Enable [Virtual Displays](https://cloud.google.com/compute/docs/instances/enable-instance-virtual-display#verify_display_driver) on this instance.
**Note**: `allow_stopping_for_update` must be set to true in order to update this field.
:param pulumi.Input[list] guest_accelerators: List of the type and count of accelerator cards attached to the instance. Structure documented below.
:param pulumi.Input[str] instance_description: A brief description to use for instances
created from this template.
:param pulumi.Input[dict] labels: A set of key/value label pairs to assign to instances
created from this template,
:param pulumi.Input[str] machine_type: The machine type to create.
:param pulumi.Input[dict] metadata: Metadata key/value pairs to make available from
within instances created from this template.
:param pulumi.Input[str] metadata_startup_script: An alternative to using the
startup-script metadata key, mostly to match the compute_instance resource.
This replaces the startup-script metadata key on the created instance and
thus the two mechanisms are not allowed to be used simultaneously.
:param pulumi.Input[str] min_cpu_platform: Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as
`Intel Haswell` or `Intel Skylake`. See the complete list [here](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform).
:param pulumi.Input[str] name: The name of the instance template. If you leave
this blank, this provider will auto-generate a unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[list] network_interfaces: Networks to attach to instances created from
this template. This can be specified multiple times for multiple networks.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: An instance template is a global resource that is not
bound to a zone or a region. However, you can still specify some regional
resources in an instance template, which restricts the template to the
region where that resource resides. For example, a custom `subnetwork`
resource is tied to a specific region. Defaults to the region of the
Provider if no value is given.
:param pulumi.Input[dict] scheduling: The scheduling strategy to use. More details about
this configuration option are detailed below.
:param pulumi.Input[dict] service_account: Service account to attach to the instance. Structure is documented below.
:param pulumi.Input[dict] shielded_instance_config: Enable [Shielded VM](https://cloud.google.com/security/shielded-cloud/shielded-vm) on this instance. Shielded VM provides verifiable integrity to prevent against malware and rootkits. Defaults to disabled. Structure is documented below.
**Note**: `shielded_instance_config` can only be used with boot images with shielded vm support. See the complete list [here](https://cloud.google.com/compute/docs/images#shielded-images).
:param pulumi.Input[list] tags: Tags to attach to the instance.
The **disks** object supports the following:
* `autoDelete` (`pulumi.Input[bool]`)
* `boot` (`pulumi.Input[bool]`)
* `device_name` (`pulumi.Input[str]`)
* `disk_encryption_key` (`pulumi.Input[dict]`)
* `kmsKeySelfLink` (`pulumi.Input[str]`)
* `diskName` (`pulumi.Input[str]`)
* `disk_size_gb` (`pulumi.Input[float]`)
* `diskType` (`pulumi.Input[str]`)
* `interface` (`pulumi.Input[str]`)
* `labels` (`pulumi.Input[dict]`) - A set of key/value label pairs to assign to instances
created from this template,
* `mode` (`pulumi.Input[str]`)
* `source` (`pulumi.Input[str]`)
* `sourceImage` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[str]`)
The **guest_accelerators** object supports the following:
* `count` (`pulumi.Input[float]`)
* `type` (`pulumi.Input[str]`)
The **network_interfaces** object supports the following:
* `accessConfigs` (`pulumi.Input[list]`)
* `natIp` (`pulumi.Input[str]`)
* `network_tier` (`pulumi.Input[str]`)
* `aliasIpRanges` (`pulumi.Input[list]`)
* `ip_cidr_range` (`pulumi.Input[str]`)
* `subnetworkRangeName` (`pulumi.Input[str]`)
* `network` (`pulumi.Input[str]`)
* `networkIp` (`pulumi.Input[str]`)
* `subnetwork` (`pulumi.Input[str]`)
* `subnetworkProject` (`pulumi.Input[str]`)
The **scheduling** object supports the following:
* `automaticRestart` (`pulumi.Input[bool]`)
* `nodeAffinities` (`pulumi.Input[list]`)
* `key` (`pulumi.Input[str]`)
* `operator` (`pulumi.Input[str]`)
* `values` (`pulumi.Input[list]`)
* `onHostMaintenance` (`pulumi.Input[str]`)
* `preemptible` (`pulumi.Input[bool]`)
The **service_account** object supports the following:
* `email` (`pulumi.Input[str]`)
* `scopes` (`pulumi.Input[list]`)
The **shielded_instance_config** object supports the following:
* `enableIntegrityMonitoring` (`pulumi.Input[bool]`)
* `enableSecureBoot` (`pulumi.Input[bool]`)
* `enableVtpm` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_instance_template.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['can_ip_forward'] = can_ip_forward
__props__['description'] = description
if disks is None:
raise TypeError("Missing required property 'disks'")
__props__['disks'] = disks
__props__['enable_display'] = enable_display
__props__['guest_accelerators'] = guest_accelerators
__props__['instance_description'] = instance_description
__props__['labels'] = labels
if machine_type is None:
raise TypeError("Missing required property 'machine_type'")
__props__['machine_type'] = machine_type
__props__['metadata'] = metadata
__props__['metadata_startup_script'] = metadata_startup_script
__props__['min_cpu_platform'] = min_cpu_platform
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['network_interfaces'] = network_interfaces
__props__['project'] = project
__props__['region'] = region
__props__['scheduling'] = scheduling
__props__['service_account'] = service_account
__props__['shielded_instance_config'] = shielded_instance_config
__props__['tags'] = tags
__props__['metadata_fingerprint'] = None
__props__['self_link'] = None
__props__['tags_fingerprint'] = None
super(InstanceTemplate, __self__).__init__(
'gcp:compute/instanceTemplate:InstanceTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, can_ip_forward=None, description=None, disks=None, enable_display=None, guest_accelerators=None, instance_description=None, labels=None, machine_type=None, metadata=None, metadata_fingerprint=None, metadata_startup_script=None, min_cpu_platform=None, name=None, name_prefix=None, network_interfaces=None, project=None, region=None, scheduling=None, self_link=None, service_account=None, shielded_instance_config=None, tags=None, tags_fingerprint=None):
"""
Get an existing InstanceTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] can_ip_forward: Whether to allow sending and receiving of
packets with non-matching source or destination IPs. This defaults to false.
:param pulumi.Input[str] description: A brief description of this resource.
:param pulumi.Input[list] disks: Disks to attach to instances created from this template.
This can be specified multiple times for multiple disks. Structure is
documented below.
:param pulumi.Input[bool] enable_display: Enable [Virtual Displays](https://cloud.google.com/compute/docs/instances/enable-instance-virtual-display#verify_display_driver) on this instance.
**Note**: `allow_stopping_for_update` must be set to true in order to update this field.
:param pulumi.Input[list] guest_accelerators: List of the type and count of accelerator cards attached to the instance. Structure documented below.
:param pulumi.Input[str] instance_description: A brief description to use for instances
created from this template.
:param pulumi.Input[dict] labels: A set of key/value label pairs to assign to instances
created from this template,
:param pulumi.Input[str] machine_type: The machine type to create.
:param pulumi.Input[dict] metadata: Metadata key/value pairs to make available from
within instances created from this template.
:param pulumi.Input[str] metadata_fingerprint: The unique fingerprint of the metadata.
:param pulumi.Input[str] metadata_startup_script: An alternative to using the
startup-script metadata key, mostly to match the compute_instance resource.
This replaces the startup-script metadata key on the created instance and
thus the two mechanisms are not allowed to be used simultaneously.
:param pulumi.Input[str] min_cpu_platform: Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as
`Intel Haswell` or `Intel Skylake`. See the complete list [here](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform).
:param pulumi.Input[str] name: The name of the instance template. If you leave
this blank, this provider will auto-generate a unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[list] network_interfaces: Networks to attach to instances created from
this template. This can be specified multiple times for multiple networks.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: An instance template is a global resource that is not
bound to a zone or a region. However, you can still specify some regional
resources in an instance template, which restricts the template to the
region where that resource resides. For example, a custom `subnetwork`
resource is tied to a specific region. Defaults to the region of the
Provider if no value is given.
:param pulumi.Input[dict] scheduling: The scheduling strategy to use. More details about
this configuration option are detailed below.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[dict] service_account: Service account to attach to the instance. Structure is documented below.
:param pulumi.Input[dict] shielded_instance_config: Enable [Shielded VM](https://cloud.google.com/security/shielded-cloud/shielded-vm) on this instance. Shielded VM provides verifiable integrity to prevent against malware and rootkits. Defaults to disabled. Structure is documented below.
**Note**: `shielded_instance_config` can only be used with boot images with shielded vm support. See the complete list [here](https://cloud.google.com/compute/docs/images#shielded-images).
:param pulumi.Input[list] tags: Tags to attach to the instance.
:param pulumi.Input[str] tags_fingerprint: The unique fingerprint of the tags.
The **disks** object supports the following:
* `autoDelete` (`pulumi.Input[bool]`)
* `boot` (`pulumi.Input[bool]`)
* `device_name` (`pulumi.Input[str]`)
* `disk_encryption_key` (`pulumi.Input[dict]`)
* `kmsKeySelfLink` (`pulumi.Input[str]`)
* `diskName` (`pulumi.Input[str]`)
* `disk_size_gb` (`pulumi.Input[float]`)
* `diskType` (`pulumi.Input[str]`)
* `interface` (`pulumi.Input[str]`)
* `labels` (`pulumi.Input[dict]`) - A set of key/value label pairs to assign to instances
created from this template,
* `mode` (`pulumi.Input[str]`)
* `source` (`pulumi.Input[str]`)
* `sourceImage` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[str]`)
The **guest_accelerators** object supports the following:
* `count` (`pulumi.Input[float]`)
* `type` (`pulumi.Input[str]`)
The **network_interfaces** object supports the following:
* `accessConfigs` (`pulumi.Input[list]`)
* `natIp` (`pulumi.Input[str]`)
* `network_tier` (`pulumi.Input[str]`)
* `aliasIpRanges` (`pulumi.Input[list]`)
* `ip_cidr_range` (`pulumi.Input[str]`)
* `subnetworkRangeName` (`pulumi.Input[str]`)
* `network` (`pulumi.Input[str]`)
* `networkIp` (`pulumi.Input[str]`)
* `subnetwork` (`pulumi.Input[str]`)
* `subnetworkProject` (`pulumi.Input[str]`)
The **scheduling** object supports the following:
* `automaticRestart` (`pulumi.Input[bool]`)
* `nodeAffinities` (`pulumi.Input[list]`)
* `key` (`pulumi.Input[str]`)
* `operator` (`pulumi.Input[str]`)
* `values` (`pulumi.Input[list]`)
* `onHostMaintenance` (`pulumi.Input[str]`)
* `preemptible` (`pulumi.Input[bool]`)
The **service_account** object supports the following:
* `email` (`pulumi.Input[str]`)
* `scopes` (`pulumi.Input[list]`)
The **shielded_instance_config** object supports the following:
* `enableIntegrityMonitoring` (`pulumi.Input[bool]`)
* `enableSecureBoot` (`pulumi.Input[bool]`)
* `enableVtpm` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_instance_template.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["can_ip_forward"] = can_ip_forward
__props__["description"] = description
__props__["disks"] = disks
__props__["enable_display"] = enable_display
__props__["guest_accelerators"] = guest_accelerators
__props__["instance_description"] = instance_description
__props__["labels"] = labels
__props__["machine_type"] = machine_type
__props__["metadata"] = metadata
__props__["metadata_fingerprint"] = metadata_fingerprint
__props__["metadata_startup_script"] = metadata_startup_script
__props__["min_cpu_platform"] = min_cpu_platform
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["network_interfaces"] = network_interfaces
__props__["project"] = project
__props__["region"] = region
__props__["scheduling"] = scheduling
__props__["self_link"] = self_link
__props__["service_account"] = service_account
__props__["shielded_instance_config"] = shielded_instance_config
__props__["tags"] = tags
__props__["tags_fingerprint"] = tags_fingerprint
return InstanceTemplate(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/compute/route.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Route(pulumi.CustomResource):
description: pulumi.Output[str]
dest_range: pulumi.Output[str]
name: pulumi.Output[str]
network: pulumi.Output[str]
next_hop_gateway: pulumi.Output[str]
next_hop_ilb: pulumi.Output[str]
next_hop_instance: pulumi.Output[str]
next_hop_instance_zone: pulumi.Output[str]
"""
(Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
"""
next_hop_ip: pulumi.Output[str]
next_hop_network: pulumi.Output[str]
next_hop_vpn_tunnel: pulumi.Output[str]
priority: pulumi.Output[float]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
tags: pulumi.Output[list]
def __init__(__self__, resource_name, opts=None, description=None, dest_range=None, name=None, network=None, next_hop_gateway=None, next_hop_ilb=None, next_hop_instance=None, next_hop_instance_zone=None, next_hop_ip=None, next_hop_vpn_tunnel=None, priority=None, project=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Route resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_route.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
if dest_range is None:
raise TypeError("Missing required property 'dest_range'")
__props__['dest_range'] = dest_range
__props__['name'] = name
if network is None:
raise TypeError("Missing required property 'network'")
__props__['network'] = network
__props__['next_hop_gateway'] = next_hop_gateway
__props__['next_hop_ilb'] = next_hop_ilb
__props__['next_hop_instance'] = next_hop_instance
__props__['next_hop_instance_zone'] = next_hop_instance_zone
__props__['next_hop_ip'] = next_hop_ip
__props__['next_hop_vpn_tunnel'] = next_hop_vpn_tunnel
__props__['priority'] = priority
__props__['project'] = project
__props__['tags'] = tags
__props__['next_hop_network'] = None
__props__['self_link'] = None
super(Route, __self__).__init__(
'gcp:compute/route:Route',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, description=None, dest_range=None, name=None, network=None, next_hop_gateway=None, next_hop_ilb=None, next_hop_instance=None, next_hop_instance_zone=None, next_hop_ip=None, next_hop_network=None, next_hop_vpn_tunnel=None, priority=None, project=None, self_link=None, tags=None):
"""
Get an existing Route resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_route.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["dest_range"] = dest_range
__props__["name"] = name
__props__["network"] = network
__props__["next_hop_gateway"] = next_hop_gateway
__props__["next_hop_ilb"] = next_hop_ilb
__props__["next_hop_instance"] = next_hop_instance
__props__["next_hop_instance_zone"] = next_hop_instance_zone
__props__["next_hop_ip"] = next_hop_ip
__props__["next_hop_network"] = next_hop_network
__props__["next_hop_vpn_tunnel"] = next_hop_vpn_tunnel
__props__["priority"] = priority
__props__["project"] = project
__props__["self_link"] = self_link
__props__["tags"] = tags
return Route(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/compute/security_scan_config.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class SecurityScanConfig(pulumi.CustomResource):
authentication: pulumi.Output[dict]
blacklist_patterns: pulumi.Output[list]
display_name: pulumi.Output[str]
export_to_security_command_center: pulumi.Output[str]
max_qps: pulumi.Output[float]
name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
schedule: pulumi.Output[dict]
starting_urls: pulumi.Output[list]
target_platforms: pulumi.Output[list]
user_agent: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, authentication=None, blacklist_patterns=None, display_name=None, export_to_security_command_center=None, max_qps=None, project=None, schedule=None, starting_urls=None, target_platforms=None, user_agent=None, __props__=None, __name__=None, __opts__=None):
"""
Create a SecurityScanConfig resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **authentication** object supports the following:
* `customAccount` (`pulumi.Input[dict]`)
* `loginUrl` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `googleAccount` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
The **schedule** object supports the following:
* `intervalDurationDays` (`pulumi.Input[float]`)
* `scheduleTime` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/security_scanner_scan_config.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authentication'] = authentication
__props__['blacklist_patterns'] = blacklist_patterns
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
__props__['export_to_security_command_center'] = export_to_security_command_center
__props__['max_qps'] = max_qps
__props__['project'] = project
__props__['schedule'] = schedule
if starting_urls is None:
raise TypeError("Missing required property 'starting_urls'")
__props__['starting_urls'] = starting_urls
__props__['target_platforms'] = target_platforms
__props__['user_agent'] = user_agent
__props__['name'] = None
super(SecurityScanConfig, __self__).__init__(
'gcp:compute/securityScanConfig:SecurityScanConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, authentication=None, blacklist_patterns=None, display_name=None, export_to_security_command_center=None, max_qps=None, name=None, project=None, schedule=None, starting_urls=None, target_platforms=None, user_agent=None):
"""
Get an existing SecurityScanConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **authentication** object supports the following:
* `customAccount` (`pulumi.Input[dict]`)
* `loginUrl` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `googleAccount` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
The **schedule** object supports the following:
* `intervalDurationDays` (`pulumi.Input[float]`)
* `scheduleTime` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/security_scanner_scan_config.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authentication"] = authentication
__props__["blacklist_patterns"] = blacklist_patterns
__props__["display_name"] = display_name
__props__["export_to_security_command_center"] = export_to_security_command_center
__props__["max_qps"] = max_qps
__props__["name"] = name
__props__["project"] = project
__props__["schedule"] = schedule
__props__["starting_urls"] = starting_urls
__props__["target_platforms"] = target_platforms
__props__["user_agent"] = user_agent
return SecurityScanConfig(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/dataproc/job.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Job(pulumi.CustomResource):
driver_controls_files_uri: pulumi.Output[str]
"""
If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
"""
driver_output_resource_uri: pulumi.Output[str]
"""
A URI pointing to the location of the stdout of the job's driver program.
"""
force_delete: pulumi.Output[bool]
"""
By default, you can only delete inactive jobs within
Dataproc. Setting this to true, and calling destroy, will ensure that the
job is first cancelled before issuing the delete.
"""
hadoop_config: pulumi.Output[dict]
hive_config: pulumi.Output[dict]
labels: pulumi.Output[dict]
"""
The list of labels (key/value pairs) to add to the job.
"""
pig_config: pulumi.Output[dict]
placement: pulumi.Output[dict]
project: pulumi.Output[str]
"""
The project in which the `cluster` can be found and jobs
subsequently run against. If it is not provided, the provider project is used.
"""
pyspark_config: pulumi.Output[dict]
reference: pulumi.Output[dict]
region: pulumi.Output[str]
"""
The Cloud Dataproc region. This essentially determines which clusters are available
for this job to be submitted to. If not specified, defaults to `global`.
"""
scheduling: pulumi.Output[dict]
spark_config: pulumi.Output[dict]
sparksql_config: pulumi.Output[dict]
status: pulumi.Output[dict]
def __init__(__self__, resource_name, opts=None, force_delete=None, hadoop_config=None, hive_config=None, labels=None, pig_config=None, placement=None, project=None, pyspark_config=None, reference=None, region=None, scheduling=None, spark_config=None, sparksql_config=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a job resource within a Dataproc cluster within GCE. For more information see
[the official dataproc documentation](https://cloud.google.com/dataproc/).
!> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] force_delete: By default, you can only delete inactive jobs within
Dataproc. Setting this to true, and calling destroy, will ensure that the
job is first cancelled before issuing the delete.
:param pulumi.Input[dict] labels: The list of labels (key/value pairs) to add to the job.
:param pulumi.Input[str] project: The project in which the `cluster` can be found and jobs
subsequently run against. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Cloud Dataproc region. This essentially determines which clusters are available
for this job to be submitted to. If not specified, defaults to `global`.
The **hadoop_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **hive_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **pig_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **placement** object supports the following:
* `clusterName` (`pulumi.Input[str]`)
* `clusterUuid` (`pulumi.Input[str]`)
The **pyspark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainPythonFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
* `pythonFileUris` (`pulumi.Input[list]`)
The **reference** object supports the following:
* `job_id` (`pulumi.Input[str]`)
The **scheduling** object supports the following:
* `maxFailuresPerHour` (`pulumi.Input[float]`)
The **spark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **sparksql_config** object supports the following:
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['force_delete'] = force_delete
__props__['hadoop_config'] = hadoop_config
__props__['hive_config'] = hive_config
__props__['labels'] = labels
__props__['pig_config'] = pig_config
if placement is None:
raise TypeError("Missing required property 'placement'")
__props__['placement'] = placement
__props__['project'] = project
__props__['pyspark_config'] = pyspark_config
__props__['reference'] = reference
__props__['region'] = region
__props__['scheduling'] = scheduling
__props__['spark_config'] = spark_config
__props__['sparksql_config'] = sparksql_config
__props__['driver_controls_files_uri'] = None
__props__['driver_output_resource_uri'] = None
__props__['status'] = None
super(Job, __self__).__init__(
'gcp:dataproc/job:Job',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, driver_controls_files_uri=None, driver_output_resource_uri=None, force_delete=None, hadoop_config=None, hive_config=None, labels=None, pig_config=None, placement=None, project=None, pyspark_config=None, reference=None, region=None, scheduling=None, spark_config=None, sparksql_config=None, status=None):
"""
Get an existing Job resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] driver_controls_files_uri: If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
:param pulumi.Input[str] driver_output_resource_uri: A URI pointing to the location of the stdout of the job's driver program.
:param pulumi.Input[bool] force_delete: By default, you can only delete inactive jobs within
Dataproc. Setting this to true, and calling destroy, will ensure that the
job is first cancelled before issuing the delete.
:param pulumi.Input[dict] labels: The list of labels (key/value pairs) to add to the job.
:param pulumi.Input[str] project: The project in which the `cluster` can be found and jobs
subsequently run against. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Cloud Dataproc region. This essentially determines which clusters are available
for this job to be submitted to. If not specified, defaults to `global`.
The **hadoop_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **hive_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **pig_config** object supports the following:
* `continueOnFailure` (`pulumi.Input[bool]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **placement** object supports the following:
* `clusterName` (`pulumi.Input[str]`)
* `clusterUuid` (`pulumi.Input[str]`)
The **pyspark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainPythonFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
* `pythonFileUris` (`pulumi.Input[list]`)
The **reference** object supports the following:
* `job_id` (`pulumi.Input[str]`)
The **scheduling** object supports the following:
* `maxFailuresPerHour` (`pulumi.Input[float]`)
The **spark_config** object supports the following:
* `archiveUris` (`pulumi.Input[list]`)
* `args` (`pulumi.Input[list]`)
* `fileUris` (`pulumi.Input[list]`)
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `mainClass` (`pulumi.Input[str]`)
* `mainJarFileUri` (`pulumi.Input[str]`)
* `properties` (`pulumi.Input[dict]`)
The **sparksql_config** object supports the following:
* `jarFileUris` (`pulumi.Input[list]`)
* `loggingConfig` (`pulumi.Input[dict]`)
* `driverLogLevels` (`pulumi.Input[dict]`)
* `properties` (`pulumi.Input[dict]`)
* `queryFileUri` (`pulumi.Input[str]`)
* `queryLists` (`pulumi.Input[list]`)
* `scriptVariables` (`pulumi.Input[dict]`)
The **status** object supports the following:
* `details` (`pulumi.Input[str]`)
* `state` (`pulumi.Input[str]`)
* `stateStartTime` (`pulumi.Input[str]`)
* `substate` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["driver_controls_files_uri"] = driver_controls_files_uri
__props__["driver_output_resource_uri"] = driver_output_resource_uri
__props__["force_delete"] = force_delete
__props__["hadoop_config"] = hadoop_config
__props__["hive_config"] = hive_config
__props__["labels"] = labels
__props__["pig_config"] = pig_config
__props__["placement"] = placement
__props__["project"] = project
__props__["pyspark_config"] = pyspark_config
__props__["reference"] = reference
__props__["region"] = region
__props__["scheduling"] = scheduling
__props__["spark_config"] = spark_config
__props__["sparksql_config"] = sparksql_config
__props__["status"] = status
return Job(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/iam/get_rule.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetRuleResult:
"""
A collection of values returned by getRule.
"""
def __init__(__self__, included_permissions=None, name=None, stage=None, title=None, id=None):
if included_permissions and not isinstance(included_permissions, list):
raise TypeError("Expected argument 'included_permissions' to be a list")
__self__.included_permissions = included_permissions
"""
specifies the list of one or more permissions to include in the custom role, such as - `iam.roles.get`
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if stage and not isinstance(stage, str):
raise TypeError("Expected argument 'stage' to be a str")
__self__.stage = stage
"""
indicates the stage of a role in the launch lifecycle, such as `GA`, `BETA` or `ALPHA`.
"""
if title and not isinstance(title, str):
raise TypeError("Expected argument 'title' to be a str")
__self__.title = title
"""
is a friendly title for the role, such as "Role Viewer"
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetRuleResult(GetRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRuleResult(
included_permissions=self.included_permissions,
name=self.name,
stage=self.stage,
title=self.title,
id=self.id)
def get_rule(name=None,opts=None):
"""
Use this data source to get information about a Google IAM Role.
:param str name: The name of the Role to lookup in the form `roles/{ROLE_NAME}`, `organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}` or `projects/{PROJECT_ID}/roles/{ROLE_NAME}`
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/iam_role.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:iam/getRule:getRule', __args__, opts=opts).value
return AwaitableGetRuleResult(
included_permissions=__ret__.get('includedPermissions'),
name=__ret__.get('name'),
stage=__ret__.get('stage'),
title=__ret__.get('title'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/kms/get_kms_crypto_key.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetKMSCryptoKeyResult:
"""
A collection of values returned by getKMSCryptoKey.
"""
def __init__(__self__, key_ring=None, labels=None, name=None, purpose=None, rotation_period=None, self_link=None, version_templates=None, id=None):
if key_ring and not isinstance(key_ring, str):
raise TypeError("Expected argument 'key_ring' to be a str")
__self__.key_ring = key_ring
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
__self__.labels = labels
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if purpose and not isinstance(purpose, str):
raise TypeError("Expected argument 'purpose' to be a str")
__self__.purpose = purpose
"""
Defines the cryptographic capabilities of the key.
"""
if rotation_period and not isinstance(rotation_period, str):
raise TypeError("Expected argument 'rotation_period' to be a str")
__self__.rotation_period = rotation_period
"""
Every time this period passes, generate a new CryptoKeyVersion and set it as
the primary. The first rotation will take place after the specified period. The rotation period has the format
of a decimal number with up to 9 fractional digits, followed by the letter s (seconds).
"""
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
__self__.self_link = self_link
"""
The self link of the created CryptoKey. Its format is `projects/{projectId}/locations/{location}/keyRings/{keyRingName}/cryptoKeys/{cryptoKeyName}`.
"""
if version_templates and not isinstance(version_templates, list):
raise TypeError("Expected argument 'version_templates' to be a list")
__self__.version_templates = version_templates
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetKMSCryptoKeyResult(GetKMSCryptoKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKMSCryptoKeyResult(
key_ring=self.key_ring,
labels=self.labels,
name=self.name,
purpose=self.purpose,
rotation_period=self.rotation_period,
self_link=self.self_link,
version_templates=self.version_templates,
id=self.id)
def get_kms_crypto_key(key_ring=None,name=None,opts=None):
"""
Provides access to a Google Cloud Platform KMS CryptoKey. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys).
A CryptoKey is an interface to key material which can be used to encrypt and decrypt data. A CryptoKey belongs to a
Google Cloud KMS KeyRing.
:param str key_ring: The `self_link` of the Google Cloud Platform KeyRing to which the key belongs.
:param str name: The CryptoKey's name.
A CryptoKey’s name belonging to the specified Google Cloud Platform KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/kms_crypto_key.html.markdown.
"""
__args__ = dict()
__args__['keyRing'] = key_ring
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:kms/getKMSCryptoKey:getKMSCryptoKey', __args__, opts=opts).value
return AwaitableGetKMSCryptoKeyResult(
key_ring=__ret__.get('keyRing'),
labels=__ret__.get('labels'),
name=__ret__.get('name'),
purpose=__ret__.get('purpose'),
rotation_period=__ret__.get('rotationPeriod'),
self_link=__ret__.get('selfLink'),
version_templates=__ret__.get('versionTemplates'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/kms/get_kms_key_ring.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetKMSKeyRingResult:
"""
A collection of values returned by getKMSKeyRing.
"""
def __init__(__self__, location=None, name=None, project=None, self_link=None, id=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
__self__.self_link = self_link
"""
The self link of the created KeyRing. Its format is `projects/{projectId}/locations/{location}/keyRings/{keyRingName}`.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetKMSKeyRingResult(GetKMSKeyRingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKMSKeyRingResult(
location=self.location,
name=self.name,
project=self.project,
self_link=self.self_link,
id=self.id)
def get_kms_key_ring(location=None,name=None,project=None,opts=None):
"""
Provides access to Google Cloud Platform KMS KeyRing. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_ring)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings).
A KeyRing is a grouping of CryptoKeys for organizational purposes. A KeyRing belongs to a Google Cloud Platform Project
and resides in a specific location.
:param str location: The Google Cloud Platform location for the KeyRing.
A full list of valid locations can be found by running `gcloud kms locations list`.
:param str name: The KeyRing's name.
A KeyRing name must exist within the provided location and match the regular expression `[a-zA-Z0-9_-]{1,63}`
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/kms_key_ring.html.markdown.
"""
__args__ = dict()
__args__['location'] = location
__args__['name'] = name
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:kms/getKMSKeyRing:getKMSKeyRing', __args__, opts=opts).value
return AwaitableGetKMSKeyRingResult(
location=__ret__.get('location'),
name=__ret__.get('name'),
project=__ret__.get('project'),
self_link=__ret__.get('selfLink'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/kms/registry.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Registry(pulumi.CustomResource):
credentials: pulumi.Output[list]
"""
List of public key certificates to authenticate devices. Structure is documented below.
* `publicKeyCertificate` (`dict`)
* `certificate` (`str`)
* `format` (`str`)
"""
event_notification_configs: pulumi.Output[list]
"""
List of configurations for event notification, such as
PubSub topics to publish device events to. Structure is documented below.
* `pubsub_topic_name` (`str`)
* `subfolderMatches` (`str`)
"""
http_config: pulumi.Output[dict]
"""
Activate or deactivate HTTP. Structure is documented below.
* `http_enabled_state` (`str`)
"""
log_level: pulumi.Output[str]
mqtt_config: pulumi.Output[dict]
"""
Activate or deactivate MQTT. Structure is documented below.
* `mqtt_enabled_state` (`str`)
"""
name: pulumi.Output[str]
"""
A unique name for the resource, required by device registry.
Changing this forces a new resource to be created.
"""
project: pulumi.Output[str]
"""
The project in which the resource belongs. If it is not provided, the provider project is used.
"""
region: pulumi.Output[str]
"""
The Region in which the created address should reside. If it is not provided, the provider region is used.
"""
state_notification_config: pulumi.Output[dict]
"""
A PubSub topic to publish device state updates. Structure is documented below.
* `pubsub_topic_name` (`str`)
"""
def __init__(__self__, resource_name, opts=None, credentials=None, event_notification_configs=None, http_config=None, log_level=None, mqtt_config=None, name=None, project=None, region=None, state_notification_config=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a device registry in Google's Cloud IoT Core platform. For more information see
[the official documentation](https://cloud.google.com/iot/docs/) and
[API](https://cloud.google.com/iot/docs/reference/cloudiot/rest/v1/projects.locations.registries).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] credentials: List of public key certificates to authenticate devices. Structure is documented below.
:param pulumi.Input[list] event_notification_configs: List of configurations for event notification, such as
PubSub topics to publish device events to. Structure is documented below.
:param pulumi.Input[dict] http_config: Activate or deactivate HTTP. Structure is documented below.
:param pulumi.Input[dict] mqtt_config: Activate or deactivate MQTT. Structure is documented below.
:param pulumi.Input[str] name: A unique name for the resource, required by device registry.
Changing this forces a new resource to be created.
:param pulumi.Input[str] project: The project in which the resource belongs. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Region in which the created address should reside. If it is not provided, the provider region is used.
:param pulumi.Input[dict] state_notification_config: A PubSub topic to publish device state updates. Structure is documented below.
The **credentials** object supports the following:
* `publicKeyCertificate` (`pulumi.Input[dict]`)
* `certificate` (`pulumi.Input[str]`)
* `format` (`pulumi.Input[str]`)
The **event_notification_configs** object supports the following:
* `pubsub_topic_name` (`pulumi.Input[str]`)
* `subfolderMatches` (`pulumi.Input[str]`)
The **http_config** object supports the following:
* `http_enabled_state` (`pulumi.Input[str]`)
The **mqtt_config** object supports the following:
* `mqtt_enabled_state` (`pulumi.Input[str]`)
The **state_notification_config** object supports the following:
* `pubsub_topic_name` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/cloudiot_registry.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['credentials'] = credentials
__props__['event_notification_configs'] = event_notification_configs
__props__['http_config'] = http_config
__props__['log_level'] = log_level
__props__['mqtt_config'] = mqtt_config
__props__['name'] = name
__props__['project'] = project
__props__['region'] = region
__props__['state_notification_config'] = state_notification_config
super(Registry, __self__).__init__(
'gcp:kms/registry:Registry',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, credentials=None, event_notification_configs=None, http_config=None, log_level=None, mqtt_config=None, name=None, project=None, region=None, state_notification_config=None):
"""
Get an existing Registry resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] credentials: List of public key certificates to authenticate devices. Structure is documented below.
:param pulumi.Input[list] event_notification_configs: List of configurations for event notification, such as
PubSub topics to publish device events to. Structure is documented below.
:param pulumi.Input[dict] http_config: Activate or deactivate HTTP. Structure is documented below.
:param pulumi.Input[dict] mqtt_config: Activate or deactivate MQTT. Structure is documented below.
:param pulumi.Input[str] name: A unique name for the resource, required by device registry.
Changing this forces a new resource to be created.
:param pulumi.Input[str] project: The project in which the resource belongs. If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Region in which the created address should reside. If it is not provided, the provider region is used.
:param pulumi.Input[dict] state_notification_config: A PubSub topic to publish device state updates. Structure is documented below.
The **credentials** object supports the following:
* `publicKeyCertificate` (`pulumi.Input[dict]`)
* `certificate` (`pulumi.Input[str]`)
* `format` (`pulumi.Input[str]`)
The **event_notification_configs** object supports the following:
* `pubsub_topic_name` (`pulumi.Input[str]`)
* `subfolderMatches` (`pulumi.Input[str]`)
The **http_config** object supports the following:
* `http_enabled_state` (`pulumi.Input[str]`)
The **mqtt_config** object supports the following:
* `mqtt_enabled_state` (`pulumi.Input[str]`)
The **state_notification_config** object supports the following:
* `pubsub_topic_name` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/cloudiot_registry.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["credentials"] = credentials
__props__["event_notification_configs"] = event_notification_configs
__props__["http_config"] = http_config
__props__["log_level"] = log_level
__props__["mqtt_config"] = mqtt_config
__props__["name"] = name
__props__["project"] = project
__props__["region"] = region
__props__["state_notification_config"] = state_notification_config
return Registry(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/logging/billing_account_sink.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class BillingAccountSink(pulumi.CustomResource):
bigquery_options: pulumi.Output[dict]
"""
Options that affect sinks exporting data to BigQuery. Structure documented below.
* `usePartitionedTables` (`bool`)
"""
billing_account: pulumi.Output[str]
"""
The billing account exported to the sink.
"""
destination: pulumi.Output[str]
"""
The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, or a BigQuery dataset. Examples:
The writer associated with the sink must have access to write to the above resource.
"""
filter: pulumi.Output[str]
"""
The filter to apply when exporting logs. Only log entries that match the filter are exported.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
"""
name: pulumi.Output[str]
"""
The name of the logging sink.
"""
writer_identity: pulumi.Output[str]
"""
The identity associated with this sink. This identity must be granted write access to the
configured `destination`.
"""
def __init__(__self__, resource_name, opts=None, bigquery_options=None, billing_account=None, destination=None, filter=None, name=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a billing account logging sink. For more information see
[the official documentation](https://cloud.google.com/logging/docs/) and
[Exporting Logs in the API](https://cloud.google.com/logging/docs/api/tasks/exporting-logs).
> **Note** You must have the "Logs Configuration Writer" IAM role (`roles/logging.configWriter`)
[granted on the billing account](https://cloud.google.com/billing/reference/rest/v1/billingAccounts/getIamPolicy) to
the credentials used with this provider. [IAM roles granted on a billing account](https://cloud.google.com/billing/docs/how-to/billing-access) are separate from the
typical IAM roles granted on a project.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] bigquery_options: Options that affect sinks exporting data to BigQuery. Structure documented below.
:param pulumi.Input[str] billing_account: The billing account exported to the sink.
:param pulumi.Input[str] destination: The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, or a BigQuery dataset. Examples:
The writer associated with the sink must have access to write to the above resource.
:param pulumi.Input[str] filter: The filter to apply when exporting logs. Only log entries that match the filter are exported.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
:param pulumi.Input[str] name: The name of the logging sink.
The **bigquery_options** object supports the following:
* `usePartitionedTables` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/logging_billing_account_sink.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['bigquery_options'] = bigquery_options
if billing_account is None:
raise TypeError("Missing required property 'billing_account'")
__props__['billing_account'] = billing_account
if destination is None:
raise TypeError("Missing required property 'destination'")
__props__['destination'] = destination
__props__['filter'] = filter
__props__['name'] = name
__props__['writer_identity'] = None
super(BillingAccountSink, __self__).__init__(
'gcp:logging/billingAccountSink:BillingAccountSink',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, bigquery_options=None, billing_account=None, destination=None, filter=None, name=None, writer_identity=None):
"""
Get an existing BillingAccountSink resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] bigquery_options: Options that affect sinks exporting data to BigQuery. Structure documented below.
:param pulumi.Input[str] billing_account: The billing account exported to the sink.
:param pulumi.Input[str] destination: The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, or a BigQuery dataset. Examples:
The writer associated with the sink must have access to write to the above resource.
:param pulumi.Input[str] filter: The filter to apply when exporting logs. Only log entries that match the filter are exported.
See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
:param pulumi.Input[str] name: The name of the logging sink.
:param pulumi.Input[str] writer_identity: The identity associated with this sink. This identity must be granted write access to the
configured `destination`.
The **bigquery_options** object supports the following:
* `usePartitionedTables` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/logging_billing_account_sink.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bigquery_options"] = bigquery_options
__props__["billing_account"] = billing_account
__props__["destination"] = destination
__props__["filter"] = filter
__props__["name"] = name
__props__["writer_identity"] = writer_identity
return BillingAccountSink(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/organizations/get_active_folder.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetActiveFolderResult:
"""
A collection of values returned by getActiveFolder.
"""
def __init__(__self__, display_name=None, name=None, parent=None, id=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
__self__.display_name = display_name
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The resource name of the Folder. This uniquely identifies the folder.
"""
if parent and not isinstance(parent, str):
raise TypeError("Expected argument 'parent' to be a str")
__self__.parent = parent
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetActiveFolderResult(GetActiveFolderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActiveFolderResult(
display_name=self.display_name,
name=self.name,
parent=self.parent,
id=self.id)
def get_active_folder(display_name=None,parent=None,opts=None):
"""
Get an active folder within GCP by `display_name` and `parent`.
:param str display_name: The folder's display name.
:param str parent: The resource name of the parent Folder or Organization.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/active_folder.html.markdown.
"""
__args__ = dict()
__args__['displayName'] = display_name
__args__['parent'] = parent
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:organizations/getActiveFolder:getActiveFolder', __args__, opts=opts).value
return AwaitableGetActiveFolderResult(
display_name=__ret__.get('displayName'),
name=__ret__.get('name'),
parent=__ret__.get('parent'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/organizations/get_folder.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetFolderResult:
"""
A collection of values returned by getFolder.
"""
def __init__(__self__, create_time=None, display_name=None, folder=None, lifecycle_state=None, lookup_organization=None, name=None, organization=None, parent=None, id=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
__self__.create_time = create_time
"""
Timestamp when the Organization was created. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
"""
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
__self__.display_name = display_name
"""
The folder's display name.
"""
if folder and not isinstance(folder, str):
raise TypeError("Expected argument 'folder' to be a str")
__self__.folder = folder
if lifecycle_state and not isinstance(lifecycle_state, str):
raise TypeError("Expected argument 'lifecycle_state' to be a str")
__self__.lifecycle_state = lifecycle_state
"""
The Folder's current lifecycle state.
"""
if lookup_organization and not isinstance(lookup_organization, bool):
raise TypeError("Expected argument 'lookup_organization' to be a bool")
__self__.lookup_organization = lookup_organization
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The resource name of the Folder in the form `folders/{folder_id}`.
"""
if organization and not isinstance(organization, str):
raise TypeError("Expected argument 'organization' to be a str")
__self__.organization = organization
"""
If `lookup_organization` is enable, the resource name of the Organization that the folder belongs.
"""
if parent and not isinstance(parent, str):
raise TypeError("Expected argument 'parent' to be a str")
__self__.parent = parent
"""
The resource name of the parent Folder or Organization.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetFolderResult(GetFolderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFolderResult(
create_time=self.create_time,
display_name=self.display_name,
folder=self.folder,
lifecycle_state=self.lifecycle_state,
lookup_organization=self.lookup_organization,
name=self.name,
organization=self.organization,
parent=self.parent,
id=self.id)
def get_folder(folder=None,lookup_organization=None,opts=None):
"""
Use this data source to get information about a Google Cloud Folder.
:param str folder: The name of the Folder in the form `{folder_id}` or `folders/{folder_id}`.
:param bool lookup_organization: `true` to find the organization that the folder belongs, `false` to avoid the lookup. It searches up the tree. (defaults to `false`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/folder.html.markdown.
"""
__args__ = dict()
__args__['folder'] = folder
__args__['lookupOrganization'] = lookup_organization
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:organizations/getFolder:getFolder', __args__, opts=opts).value
return AwaitableGetFolderResult(
create_time=__ret__.get('createTime'),
display_name=__ret__.get('displayName'),
folder=__ret__.get('folder'),
lifecycle_state=__ret__.get('lifecycleState'),
lookup_organization=__ret__.get('lookupOrganization'),
name=__ret__.get('name'),
organization=__ret__.get('organization'),
parent=__ret__.get('parent'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/organizations/get_organization.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetOrganizationResult:
"""
A collection of values returned by getOrganization.
"""
def __init__(__self__, create_time=None, directory_customer_id=None, domain=None, lifecycle_state=None, name=None, org_id=None, organization=None, id=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
__self__.create_time = create_time
"""
Timestamp when the Organization was created. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".
"""
if directory_customer_id and not isinstance(directory_customer_id, str):
raise TypeError("Expected argument 'directory_customer_id' to be a str")
__self__.directory_customer_id = directory_customer_id
"""
The Google for Work customer ID of the Organization.
"""
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
__self__.domain = domain
if lifecycle_state and not isinstance(lifecycle_state, str):
raise TypeError("Expected argument 'lifecycle_state' to be a str")
__self__.lifecycle_state = lifecycle_state
"""
The Organization's current lifecycle state.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The resource name of the Organization in the form `organizations/{organization_id}`.
"""
if org_id and not isinstance(org_id, str):
raise TypeError("Expected argument 'org_id' to be a str")
__self__.org_id = org_id
"""
The Organization ID.
"""
if organization and not isinstance(organization, str):
raise TypeError("Expected argument 'organization' to be a str")
__self__.organization = organization
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetOrganizationResult(GetOrganizationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOrganizationResult(
create_time=self.create_time,
directory_customer_id=self.directory_customer_id,
domain=self.domain,
lifecycle_state=self.lifecycle_state,
name=self.name,
org_id=self.org_id,
organization=self.organization,
id=self.id)
def get_organization(domain=None,organization=None,opts=None):
"""
Use this data source to get information about a Google Cloud Organization.
:param str domain: The domain name of the Organization.
:param str organization: The name of the Organization in the form `{organization_id}` or `organizations/{organization_id}`.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/organization.html.markdown.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['organization'] = organization
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:organizations/getOrganization:getOrganization', __args__, opts=opts).value
return AwaitableGetOrganizationResult(
create_time=__ret__.get('createTime'),
directory_customer_id=__ret__.get('directoryCustomerId'),
domain=__ret__.get('domain'),
lifecycle_state=__ret__.get('lifecycleState'),
name=__ret__.get('name'),
org_id=__ret__.get('orgId'),
organization=__ret__.get('organization'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/projects/get_project.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetProjectResult:
"""
A collection of values returned by getProject.
"""
def __init__(__self__, filter=None, projects=None, id=None):
if filter and not isinstance(filter, str):
raise TypeError("Expected argument 'filter' to be a str")
__self__.filter = filter
if projects and not isinstance(projects, list):
raise TypeError("Expected argument 'projects' to be a list")
__self__.projects = projects
"""
A list of projects matching the provided filter. Structure is defined below.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetProjectResult(GetProjectResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProjectResult(
filter=self.filter,
projects=self.projects,
id=self.id)
def get_project(filter=None,opts=None):
"""
Retrieve information about a set of projects based on a filter. See the
[REST API](https://cloud.google.com/resource-manager/reference/rest/v1/projects/list)
for more details.
:param str filter: A string filter as defined in the [REST API](https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#query-parameters).
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/projects.html.markdown.
"""
__args__ = dict()
__args__['filter'] = filter
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:projects/getProject:getProject', __args__, opts=opts).value
return AwaitableGetProjectResult(
filter=__ret__.get('filter'),
projects=__ret__.get('projects'),
id=__ret__.get('id'))
```
#### File: pulumi_gcp/sql/ssl_cert.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class SslCert(pulumi.CustomResource):
cert: pulumi.Output[str]
"""
The actual certificate data for this client certificate.
"""
cert_serial_number: pulumi.Output[str]
"""
The serial number extracted from the certificate data.
"""
common_name: pulumi.Output[str]
"""
The common name to be used in the certificate to identify the
client. Constrained to [a-zA-Z.-_ ]+. Changing this forces a new resource to be created.
"""
create_time: pulumi.Output[str]
"""
The time when the certificate was created in RFC 3339 format,
for example 2012-11-15T16:19:00.094Z.
"""
expiration_time: pulumi.Output[str]
"""
The time when the certificate expires in RFC 3339 format,
for example 2012-11-15T16:19:00.094Z.
"""
instance: pulumi.Output[str]
"""
The name of the Cloud SQL instance. Changing this
forces a new resource to be created.
"""
private_key: pulumi.Output[str]
"""
The private key associated with the client certificate.
"""
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
server_ca_cert: pulumi.Output[str]
"""
The CA cert of the server this client cert was generated from.
"""
sha1_fingerprint: pulumi.Output[str]
"""
The SHA1 Fingerprint of the certificate.
"""
def __init__(__self__, resource_name, opts=None, common_name=None, instance=None, project=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a new Google SQL SSL Cert on a Google SQL Instance. For more information, see the [official documentation](https://cloud.google.com/sql/), or the [JSON API](https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/sslCerts).
> **Note:** All arguments including the private key will be stored in the raw state as plain-text.
[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] common_name: The common name to be used in the certificate to identify the
client. Constrained to [a-zA-Z.-_ ]+. Changing this forces a new resource to be created.
:param pulumi.Input[str] instance: The name of the Cloud SQL instance. Changing this
forces a new resource to be created.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_ssl_cert.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if common_name is None:
raise TypeError("Missing required property 'common_name'")
__props__['common_name'] = common_name
if instance is None:
raise TypeError("Missing required property 'instance'")
__props__['instance'] = instance
__props__['project'] = project
__props__['cert'] = None
__props__['cert_serial_number'] = None
__props__['create_time'] = None
__props__['expiration_time'] = None
__props__['private_key'] = None
__props__['server_ca_cert'] = None
__props__['sha1_fingerprint'] = None
super(SslCert, __self__).__init__(
'gcp:sql/sslCert:SslCert',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, cert=None, cert_serial_number=None, common_name=None, create_time=None, expiration_time=None, instance=None, private_key=None, project=None, server_ca_cert=None, sha1_fingerprint=None):
"""
Get an existing SslCert resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cert: The actual certificate data for this client certificate.
:param pulumi.Input[str] cert_serial_number: The serial number extracted from the certificate data.
:param pulumi.Input[str] common_name: The common name to be used in the certificate to identify the
client. Constrained to [a-zA-Z.-_ ]+. Changing this forces a new resource to be created.
:param pulumi.Input[str] create_time: The time when the certificate was created in RFC 3339 format,
for example 2012-11-15T16:19:00.094Z.
:param pulumi.Input[str] expiration_time: The time when the certificate expires in RFC 3339 format,
for example 2012-11-15T16:19:00.094Z.
:param pulumi.Input[str] instance: The name of the Cloud SQL instance. Changing this
forces a new resource to be created.
:param pulumi.Input[str] private_key: The private key associated with the client certificate.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] server_ca_cert: The CA cert of the server this client cert was generated from.
:param pulumi.Input[str] sha1_fingerprint: The SHA1 Fingerprint of the certificate.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_ssl_cert.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cert"] = cert
__props__["cert_serial_number"] = cert_serial_number
__props__["common_name"] = common_name
__props__["create_time"] = create_time
__props__["expiration_time"] = expiration_time
__props__["instance"] = instance
__props__["private_key"] = private_key
__props__["project"] = project
__props__["server_ca_cert"] = server_ca_cert
__props__["sha1_fingerprint"] = sha1_fingerprint
return SslCert(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/storage/bucket.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Bucket(pulumi.CustomResource):
bucket_policy_only: pulumi.Output[bool]
"""
Enables [Bucket Policy Only](https://cloud.google.com/storage/docs/bucket-policy-only) access to a bucket.
"""
cors: pulumi.Output[list]
"""
The bucket's [Cross-Origin Resource Sharing (CORS)](https://www.w3.org/TR/cors/) configuration. Multiple blocks of this type are permitted. Structure is documented below.
* `maxAgeSeconds` (`float`)
* `methods` (`list`)
* `origins` (`list`)
* `responseHeaders` (`list`)
"""
default_event_based_hold: pulumi.Output[bool]
encryption: pulumi.Output[dict]
"""
The bucket's encryption configuration.
* `defaultKmsKeyName` (`str`)
"""
force_destroy: pulumi.Output[bool]
"""
When deleting a bucket, this
boolean option will delete all contained objects. If you try to delete a
bucket that contains objects, this provider will fail that run.
"""
labels: pulumi.Output[dict]
"""
A set of key/value label pairs to assign to the bucket.
"""
lifecycle_rules: pulumi.Output[list]
"""
The bucket's [Lifecycle Rules](https://cloud.google.com/storage/docs/lifecycle#configuration) configuration. Multiple blocks of this type are permitted. Structure is documented below.
* `action` (`dict`)
* `storage_class` (`str`) - The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
* `type` (`str`)
* `condition` (`dict`)
* `age` (`float`)
* `createdBefore` (`str`)
* `matchesStorageClasses` (`list`)
* `numNewerVersions` (`float`)
* `withState` (`str`)
"""
location: pulumi.Output[str]
"""
The [GCS location](https://cloud.google.com/storage/docs/bucket-locations)
"""
logging: pulumi.Output[dict]
"""
The bucket's [Access & Storage Logs](https://cloud.google.com/storage/docs/access-logs) configuration.
* `logBucket` (`str`)
* `logObjectPrefix` (`str`)
"""
name: pulumi.Output[str]
"""
The name of the bucket.
"""
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
requester_pays: pulumi.Output[bool]
"""
Enables [Requester Pays](https://cloud.google.com/storage/docs/requester-pays) on a storage bucket.
"""
retention_policy: pulumi.Output[dict]
"""
Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Structure is documented below.
* `isLocked` (`bool`)
* `retentionPeriod` (`float`)
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
storage_class: pulumi.Output[str]
"""
The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
"""
url: pulumi.Output[str]
"""
The base URL of the bucket, in the format `gs://<bucket-name>`.
"""
versioning: pulumi.Output[dict]
"""
The bucket's [Versioning](https://cloud.google.com/storage/docs/object-versioning) configuration.
* `enabled` (`bool`)
"""
website: pulumi.Output[dict]
"""
Configuration if the bucket acts as a website. Structure is documented below.
* `mainPageSuffix` (`str`)
* `notFoundPage` (`str`)
"""
def __init__(__self__, resource_name, opts=None, bucket_policy_only=None, cors=None, default_event_based_hold=None, encryption=None, force_destroy=None, labels=None, lifecycle_rules=None, location=None, logging=None, name=None, project=None, requester_pays=None, retention_policy=None, storage_class=None, versioning=None, website=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a new bucket in Google cloud storage service (GCS).
Once a bucket has been created, its location can't be changed.
[ACLs](https://cloud.google.com/storage/docs/access-control/lists) can be applied
using the [`storage.BucketACL`](https://www.terraform.io/docs/providers/google/r/storage_bucket_acl.html) resource.
For more information see
[the official documentation](https://cloud.google.com/storage/docs/overview)
and
[API](https://cloud.google.com/storage/docs/json_api/v1/buckets).
**Note**: If the project id is not set on the resource or in the provider block it will be dynamically
determined which will require enabling the compute api.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] bucket_policy_only: Enables [Bucket Policy Only](https://cloud.google.com/storage/docs/bucket-policy-only) access to a bucket.
:param pulumi.Input[list] cors: The bucket's [Cross-Origin Resource Sharing (CORS)](https://www.w3.org/TR/cors/) configuration. Multiple blocks of this type are permitted. Structure is documented below.
:param pulumi.Input[dict] encryption: The bucket's encryption configuration.
:param pulumi.Input[bool] force_destroy: When deleting a bucket, this
boolean option will delete all contained objects. If you try to delete a
bucket that contains objects, this provider will fail that run.
:param pulumi.Input[dict] labels: A set of key/value label pairs to assign to the bucket.
:param pulumi.Input[list] lifecycle_rules: The bucket's [Lifecycle Rules](https://cloud.google.com/storage/docs/lifecycle#configuration) configuration. Multiple blocks of this type are permitted. Structure is documented below.
:param pulumi.Input[str] location: The [GCS location](https://cloud.google.com/storage/docs/bucket-locations)
:param pulumi.Input[dict] logging: The bucket's [Access & Storage Logs](https://cloud.google.com/storage/docs/access-logs) configuration.
:param pulumi.Input[str] name: The name of the bucket.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[bool] requester_pays: Enables [Requester Pays](https://cloud.google.com/storage/docs/requester-pays) on a storage bucket.
:param pulumi.Input[dict] retention_policy: Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Structure is documented below.
:param pulumi.Input[str] storage_class: The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
:param pulumi.Input[dict] versioning: The bucket's [Versioning](https://cloud.google.com/storage/docs/object-versioning) configuration.
:param pulumi.Input[dict] website: Configuration if the bucket acts as a website. Structure is documented below.
The **cors** object supports the following:
* `maxAgeSeconds` (`pulumi.Input[float]`)
* `methods` (`pulumi.Input[list]`)
* `origins` (`pulumi.Input[list]`)
* `responseHeaders` (`pulumi.Input[list]`)
The **encryption** object supports the following:
* `defaultKmsKeyName` (`pulumi.Input[str]`)
The **lifecycle_rules** object supports the following:
* `action` (`pulumi.Input[dict]`)
* `storage_class` (`pulumi.Input[str]`) - The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
* `type` (`pulumi.Input[str]`)
* `condition` (`pulumi.Input[dict]`)
* `age` (`pulumi.Input[float]`)
* `createdBefore` (`pulumi.Input[str]`)
* `matchesStorageClasses` (`pulumi.Input[list]`)
* `numNewerVersions` (`pulumi.Input[float]`)
* `withState` (`pulumi.Input[str]`)
The **logging** object supports the following:
* `logBucket` (`pulumi.Input[str]`)
* `logObjectPrefix` (`pulumi.Input[str]`)
The **retention_policy** object supports the following:
* `isLocked` (`pulumi.Input[bool]`)
* `retentionPeriod` (`pulumi.Input[float]`)
The **versioning** object supports the following:
* `enabled` (`pulumi.Input[bool]`)
The **website** object supports the following:
* `mainPageSuffix` (`pulumi.Input[str]`)
* `notFoundPage` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/storage_bucket.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['bucket_policy_only'] = bucket_policy_only
__props__['cors'] = cors
__props__['default_event_based_hold'] = default_event_based_hold
__props__['encryption'] = encryption
__props__['force_destroy'] = force_destroy
__props__['labels'] = labels
__props__['lifecycle_rules'] = lifecycle_rules
__props__['location'] = location
__props__['logging'] = logging
__props__['name'] = name
__props__['project'] = project
__props__['requester_pays'] = requester_pays
__props__['retention_policy'] = retention_policy
__props__['storage_class'] = storage_class
__props__['versioning'] = versioning
__props__['website'] = website
__props__['self_link'] = None
__props__['url'] = None
super(Bucket, __self__).__init__(
'gcp:storage/bucket:Bucket',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, bucket_policy_only=None, cors=None, default_event_based_hold=None, encryption=None, force_destroy=None, labels=None, lifecycle_rules=None, location=None, logging=None, name=None, project=None, requester_pays=None, retention_policy=None, self_link=None, storage_class=None, url=None, versioning=None, website=None):
"""
Get an existing Bucket resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] bucket_policy_only: Enables [Bucket Policy Only](https://cloud.google.com/storage/docs/bucket-policy-only) access to a bucket.
:param pulumi.Input[list] cors: The bucket's [Cross-Origin Resource Sharing (CORS)](https://www.w3.org/TR/cors/) configuration. Multiple blocks of this type are permitted. Structure is documented below.
:param pulumi.Input[dict] encryption: The bucket's encryption configuration.
:param pulumi.Input[bool] force_destroy: When deleting a bucket, this
boolean option will delete all contained objects. If you try to delete a
bucket that contains objects, this provider will fail that run.
:param pulumi.Input[dict] labels: A set of key/value label pairs to assign to the bucket.
:param pulumi.Input[list] lifecycle_rules: The bucket's [Lifecycle Rules](https://cloud.google.com/storage/docs/lifecycle#configuration) configuration. Multiple blocks of this type are permitted. Structure is documented below.
:param pulumi.Input[str] location: The [GCS location](https://cloud.google.com/storage/docs/bucket-locations)
:param pulumi.Input[dict] logging: The bucket's [Access & Storage Logs](https://cloud.google.com/storage/docs/access-logs) configuration.
:param pulumi.Input[str] name: The name of the bucket.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[bool] requester_pays: Enables [Requester Pays](https://cloud.google.com/storage/docs/requester-pays) on a storage bucket.
:param pulumi.Input[dict] retention_policy: Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Structure is documented below.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[str] storage_class: The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
:param pulumi.Input[str] url: The base URL of the bucket, in the format `gs://<bucket-name>`.
:param pulumi.Input[dict] versioning: The bucket's [Versioning](https://cloud.google.com/storage/docs/object-versioning) configuration.
:param pulumi.Input[dict] website: Configuration if the bucket acts as a website. Structure is documented below.
The **cors** object supports the following:
* `maxAgeSeconds` (`pulumi.Input[float]`)
* `methods` (`pulumi.Input[list]`)
* `origins` (`pulumi.Input[list]`)
* `responseHeaders` (`pulumi.Input[list]`)
The **encryption** object supports the following:
* `defaultKmsKeyName` (`pulumi.Input[str]`)
The **lifecycle_rules** object supports the following:
* `action` (`pulumi.Input[dict]`)
* `storage_class` (`pulumi.Input[str]`) - The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `STANDARD`, `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
* `type` (`pulumi.Input[str]`)
* `condition` (`pulumi.Input[dict]`)
* `age` (`pulumi.Input[float]`)
* `createdBefore` (`pulumi.Input[str]`)
* `matchesStorageClasses` (`pulumi.Input[list]`)
* `numNewerVersions` (`pulumi.Input[float]`)
* `withState` (`pulumi.Input[str]`)
The **logging** object supports the following:
* `logBucket` (`pulumi.Input[str]`)
* `logObjectPrefix` (`pulumi.Input[str]`)
The **retention_policy** object supports the following:
* `isLocked` (`pulumi.Input[bool]`)
* `retentionPeriod` (`pulumi.Input[float]`)
The **versioning** object supports the following:
* `enabled` (`pulumi.Input[bool]`)
The **website** object supports the following:
* `mainPageSuffix` (`pulumi.Input[str]`)
* `notFoundPage` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/storage_bucket.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket_policy_only"] = bucket_policy_only
__props__["cors"] = cors
__props__["default_event_based_hold"] = default_event_based_hold
__props__["encryption"] = encryption
__props__["force_destroy"] = force_destroy
__props__["labels"] = labels
__props__["lifecycle_rules"] = lifecycle_rules
__props__["location"] = location
__props__["logging"] = logging
__props__["name"] = name
__props__["project"] = project
__props__["requester_pays"] = requester_pays
__props__["retention_policy"] = retention_policy
__props__["self_link"] = self_link
__props__["storage_class"] = storage_class
__props__["url"] = url
__props__["versioning"] = versioning
__props__["website"] = website
return Bucket(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_gcp/storage/default_object_access_control.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class DefaultObjectAccessControl(pulumi.CustomResource):
bucket: pulumi.Output[str]
domain: pulumi.Output[str]
email: pulumi.Output[str]
entity: pulumi.Output[str]
entity_id: pulumi.Output[str]
generation: pulumi.Output[float]
object: pulumi.Output[str]
project_team: pulumi.Output[dict]
role: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, bucket=None, entity=None, object=None, role=None, __props__=None, __name__=None, __opts__=None):
"""
The DefaultObjectAccessControls resources represent the Access Control
Lists (ACLs) applied to a new object within a Google Cloud Storage bucket
when no ACL was provided for that object. ACLs let you specify who has
access to your bucket contents and to what extent.
There are two roles that can be assigned to an entity:
READERs can get an object, though the acl property will not be revealed.
OWNERs are READERs, and they can get the acl property, update an object,
and call all objectAccessControls methods on the object. The owner of an
object is always an OWNER.
For more information, see Access Control, with the caveat that this API
uses READER and OWNER instead of READ and FULL_CONTROL.
To get more information about DefaultObjectAccessControl, see:
* [API documentation](https://cloud.google.com/storage/docs/json_api/v1/defaultObjectAccessControls)
* How-to Guides
* [Official Documentation](https://cloud.google.com/storage/docs/access-control/create-manage-lists)
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/storage_default_object_access_control.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
if entity is None:
raise TypeError("Missing required property 'entity'")
__props__['entity'] = entity
__props__['object'] = object
if role is None:
raise TypeError("Missing required property 'role'")
__props__['role'] = role
__props__['domain'] = None
__props__['email'] = None
__props__['entity_id'] = None
__props__['generation'] = None
__props__['project_team'] = None
super(DefaultObjectAccessControl, __self__).__init__(
'gcp:storage/defaultObjectAccessControl:DefaultObjectAccessControl',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, bucket=None, domain=None, email=None, entity=None, entity_id=None, generation=None, object=None, project_team=None, role=None):
"""
Get an existing DefaultObjectAccessControl resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
The **project_team** object supports the following:
* `projectNumber` (`pulumi.Input[str]`)
* `team` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/storage_default_object_access_control.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket"] = bucket
__props__["domain"] = domain
__props__["email"] = email
__props__["entity"] = entity
__props__["entity_id"] = entity_id
__props__["generation"] = generation
__props__["object"] = object
__props__["project_team"] = project_team
__props__["role"] = role
return DefaultObjectAccessControl(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
``` |
{
"source": "23errg/GamestonkTerminal",
"score": 2
} |
#### File: stocks/technical_analysis/fib.py
```python
import logging
import plotly.graph_objects as go
from bots import imps, load_candle
from openbb_terminal.common.technical_analysis import custom_indicators_model
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def fib_command(
ticker="",
interval: int = 15,
past_days: int = 0,
start: str = "",
end: str = "",
extended_hours: bool = False,
heikin_candles: bool = False,
news: bool = False,
):
"""Displays chart with fibonacci retracement [Yahoo Finance]"""
# Debug
if imps.DEBUG:
# pylint: disable=logging-too-many-args
logger.debug(
"ta fib %s %s %s %s %s %s %s %s",
ticker,
interval,
past_days,
start,
end,
extended_hours,
heikin_candles,
news,
)
past_days = (past_days + 1) if (interval != 1440) or (start != "") else 365
# Retrieve Data
(df_stock, start, end, bar_start,) = load_candle.stock_data(
ticker=ticker,
interval=interval,
past_days=past_days,
extended_hours=extended_hours,
start=start,
end=end,
heikin_candles=heikin_candles,
)
if df_stock.empty:
raise Exception(f"No data found for {ticker.upper()}.")
df_ta = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
# Output Data
if interval != 1440:
df_ta = df_ta.loc[(df_ta.index >= bar_start) & (df_ta.index < end)]
(
df_fib,
min_date,
max_date,
min_pr,
max_pr,
) = custom_indicators_model.calculate_fib_levels(df_ta, 12, bar_start, None)
levels = df_fib.Price
# Output Data
fibs = [
"<b>0</b>",
"<b>0.235</b>",
"<b>0.382</b>",
"<b>0.5</b>",
"<b>0.618</b>",
"<b>0.65</b>",
"<b>1</b>",
]
plot = load_candle.candle_fig(
df_ta,
ticker,
interval,
extended_hours,
news,
bar=bar_start,
int_bar=interval,
shared_xaxes=True,
vertical_spacing=0.07,
)
title = f"<b>{plot['plt_title']} Fibonacci-Retracement-Levels</b>"
lvl_text: str = "right" if min_date > max_date else "left"
fig = plot["fig"]
fig.add_trace(
go.Scatter(
x=[min_date, max_date],
y=[min_pr, max_pr],
opacity=1,
mode="lines",
line=imps.PLT_FIB_COLORWAY[8],
showlegend=False,
),
row=1,
col=1,
secondary_y=True,
)
for i in range(6):
fig.add_trace(
go.Scatter(
name=fibs[i],
x=[min_date, max_date],
y=[levels[i], levels[i]],
opacity=0.2,
mode="lines",
line_color=imps.PLT_FIB_COLORWAY[i],
showlegend=False,
),
row=1,
col=1,
secondary_y=True,
)
fig.add_trace(
go.Scatter(
name=fibs[i + 1],
x=[min_date, max_date],
y=[levels[i + 1], levels[i + 1]],
opacity=0.2,
mode="lines",
fill="tonexty",
line_color=imps.PLT_FIB_COLORWAY[i + 1],
showlegend=False,
),
row=1,
col=1,
secondary_y=True,
)
for i in range(7):
fig.add_trace(
go.Scatter(
name=fibs[i],
x=[min_date],
y=[levels[i]],
opacity=0.9,
mode="text",
text=fibs[i],
textposition=f"middle {lvl_text}" if i != 5 else f"bottom {lvl_text}",
textfont=dict(imps.PLT_FIB_COLORWAY[7], color=imps.PLT_FIB_COLORWAY[i]),
showlegend=False,
),
row=1,
col=1,
secondary_y=True,
)
fig.update_layout(
margin=dict(l=0, r=0, t=50, b=20),
template=imps.PLT_TA_STYLE_TEMPLATE,
title=title,
title_x=0.02,
title_font_size=14,
dragmode="pan",
)
imagefile = "ta_fib.png"
# Check if interactive settings are enabled
plt_link = ""
if imps.INTERACTIVE:
plt_link = imps.inter_chart(fig, imagefile, callback=False)
fig.update_layout(
width=800,
height=500,
)
imagefile = imps.image_border(imagefile, fig=fig)
return {
"title": f"Stocks: Fibonacci-Retracement-Levels {ticker.upper()}",
"description": plt_link,
"imagefile": imagefile,
}
```
#### File: stocks/technical_analysis/ma.py
```python
import logging
import plotly.graph_objects as go
from bots import imps, load_candle
from openbb_terminal.common.technical_analysis import overlap_model
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
# pylint: disable=R0913
@log_start_end(log=logger)
def ma_command(
ticker="",
interval: int = 15,
past_days: int = 0,
ma_mode="ema",
window="",
offset: int = 0,
start="",
end="",
extended_hours: bool = False,
heikin_candles: bool = False,
news: bool = False,
):
"""Displays chart with selected Moving Average [Yahoo Finance]"""
# Debug
if imps.DEBUG:
# pylint: disable=logging-too-many-args
logger.debug(
"ta ma %s %s %s %s %s %s %s %s %s %s %s",
ticker,
interval,
past_days,
ma_mode,
window,
offset,
start,
end,
extended_hours,
heikin_candles,
news,
)
# Check for argument
overlap_ma = {
"ema": overlap_model.ema,
"hma": overlap_model.hma,
"sma": overlap_model.sma,
"wma": overlap_model.wma,
"zlma": overlap_model.zlma,
}
if ticker == "":
raise Exception("Stock ticker is required")
# Retrieve Data
df_stock, start, end, bar_start = load_candle.stock_data(
ticker=ticker,
interval=interval,
past_days=past_days,
extended_hours=extended_hours,
start=start,
end=end,
heikin_candles=heikin_candles,
)
if df_stock.empty:
raise Exception("No Data Found")
if window == "":
window = [20, 50]
else:
window_temp = list()
for wind in window.split(","):
try:
window_temp.append(int(wind))
except Exception as e:
raise Exception("Window needs to be a float") from e
window = window_temp
df_ta = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
if df_ta.empty:
raise Exception("No Data Found")
for win in window:
ema_data = overlap_ma[ma_mode](
values=df_ta["Adj Close"], length=win, offset=offset
)
df_ta = df_ta.join(ema_data)
# Output Data
if interval != 1440:
df_ta = df_ta.loc[(df_ta.index >= bar_start) & (df_ta.index < end)]
df_ta = df_ta.fillna(0.0)
plot = load_candle.candle_fig(
df_ta,
ticker,
interval,
extended_hours,
news,
bar=bar_start,
int_bar=interval,
)
title = f"<b>{plot['plt_title']} Moving Average ({ma_mode.upper()})</b>"
fig = plot["fig"]
i2 = 6 if interval != 1440 else 11
for i in range(i2, df_ta.shape[1]):
fig.add_trace(
go.Scatter(
name=f"{df_ta.iloc[:, i].name}",
mode="lines",
x=df_ta.index,
y=df_ta.iloc[:, i].values,
opacity=1,
),
secondary_y=True,
row=1,
col=1,
)
fig.update_layout(
margin=dict(l=0, r=0, t=50, b=20),
template=imps.PLT_TA_STYLE_TEMPLATE,
colorway=imps.PLT_TA_COLORWAY,
title=title,
title_x=0.1,
title_font_size=14,
dragmode="pan",
)
imagefile = "ta_ma.png"
# Check if interactive settings are enabled
plt_link = ""
if imps.INTERACTIVE:
plt_link = imps.inter_chart(fig, imagefile, callback=False)
fig.update_layout(
width=800,
height=500,
)
imagefile = imps.image_border(imagefile, fig=fig)
return {
"title": f"Stocks: Moving Average {ma_mode.upper()}",
"description": plt_link,
"imagefile": imagefile,
}
```
#### File: common/quantitative_analysis/rolling_view.py
```python
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.quantitative_analysis import rolling_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, plot_autoscale, reindex_dates
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_mean_std(
name: str,
df: pd.DataFrame,
target: str,
window: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""View rolling spread
Parameters
----------
name : str
Stock ticker
df : pd.DataFrame
Dataframe
target : str
Column in data to look at
window : int
Length of window
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
data = df[target]
rolling_mean, rolling_std = rolling_model.get_rolling_avg(data, window)
plot_data = pd.merge(
data,
rolling_mean,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_mean"),
)
plot_data = pd.merge(
plot_data,
rolling_std,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_std"),
)
plot_data = reindex_dates(plot_data)
# This plot has 2 axes
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
ax1, ax2 = axes
else:
if len(external_axes) != 2:
logger.error("Expected list of two axis items.")
console.print("[red]Expected list of 2 axis items./n[/red]")
return
(ax1, ax2) = external_axes
ax1.plot(
plot_data.index,
plot_data[target].values,
label=name,
)
ax1.plot(
plot_data.index,
plot_data[target + "_mean"].values,
)
ax1.set_ylabel(
"Values",
)
ax1.legend(["Real Values", "Rolling Mean"])
ax1.set_title(f"Rolling mean and std (window {str(window)}) of {name} {target}")
ax1.set_xlim([plot_data.index[0], plot_data.index[-1]])
ax2.plot(
plot_data.index,
plot_data[target + "_std"].values,
label="Rolling std",
)
ax2.legend(["Rolling std"])
ax2.set_ylabel(
f"{target} Std Deviation",
)
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"rolling",
rolling_mean.join(rolling_std, lsuffix="_mean", rsuffix="_std"),
)
@log_start_end(log=logger)
def display_spread(
name: str,
df: pd.DataFrame,
target: str,
window: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""View rolling spread
Parameters
----------
name : str
Stock ticker
df : pd.DataFrame
Dataframe
target: str
Column in data to look at
window : int
Length of window
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
data = df[target]
df_sd, df_var = rolling_model.get_spread(data, window)
plot_data = pd.merge(
data,
df_sd,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_sd"),
)
plot_data = pd.merge(
plot_data,
df_var,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_var"),
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, axes = plt.subplots(
3,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2, ax3) = axes
else:
if len(external_axes) != 3:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of 1 axis items./n[/red]")
return
(ax1, ax2, ax3) = external_axes
ax1.plot(plot_data.index, plot_data[target].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel("Value")
ax1.set_title(f"Spread of {name} {target}")
ax2.plot(
plot_data[f"STDEV_{window}"].index,
plot_data[f"STDEV_{window}"].values,
label="Stdev",
)
ax2.set_ylabel("Stdev")
ax3.plot(
plot_data[f"VAR_{window}"].index,
plot_data[f"VAR_{window}"].values,
label="Variance",
)
ax3.set_ylabel("Variance")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax3,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"spread",
df_sd.join(df_var, lsuffix="_sd", rsuffix="_var"),
)
@log_start_end(log=logger)
def display_quantile(
name: str,
df: pd.DataFrame,
target: str,
window: int,
quantile: float,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""View rolling quantile
Parameters
----------
name : str
Stock ticker
df : pd.DataFrame
Dataframe
target : str
Column in data to look at
window : int
Length of window
quantile : float
Quantile to get
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
data = df[target]
df_med, df_quantile = rolling_model.get_quantile(data, window, quantile)
plot_data = pd.merge(
data,
df_med,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_med"),
)
plot_data = pd.merge(
plot_data,
df_quantile,
how="outer",
left_index=True,
right_index=True,
suffixes=("", "_quantile"),
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
else:
if len(external_axes) != 1:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of 1 axis items./n[/red]")
return
(ax,) = external_axes
ax.set_title(f"{name} {target} Median & Quantile")
ax.plot(plot_data.index, plot_data[target].values, label=target)
ax.plot(
plot_data.index,
plot_data[f"MEDIAN_{window}"].values,
label=f"Median w={window}",
)
ax.plot(
plot_data.index,
plot_data[f"QTL_{window}_{quantile}"].values,
label=f"Quantile q={quantile}",
linestyle="--",
)
ax.set_xlim(plot_data.index[0], plot_data.index[-1])
ax.set_ylabel(f"{name} Value")
ax.legend()
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"quantile",
df_med.join(df_quantile),
)
@log_start_end(log=logger)
def display_skew(
name: str,
df: pd.DataFrame,
target: str,
window: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""View rolling skew
Parameters
----------
name : str
Stock ticker
df : pd.DataFrame
Dataframe
target : str
Column in data to look at
window : int
Length of window
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
data = df[target]
df_skew = rolling_model.get_skew(data, window)
plot_data = pd.merge(
data,
df_skew,
how="outer",
left_index=True,
right_index=True,
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2) = axes
else:
if len(external_axes) != 2:
logger.error("Expected list of two axis items.")
console.print("[red]Expected list of 2 axis items./n[/red]")
return
(ax1, ax2) = external_axes
ax1.set_title(f"{name} Skewness Indicator")
ax1.plot(plot_data.index, plot_data[target].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel(f"{target}")
ax2.plot(plot_data.index, plot_data[f"SKEW_{window}"].values, label="Skew")
ax2.set_ylabel("Indicator")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"skew",
df_skew,
)
@log_start_end(log=logger)
def display_kurtosis(
name: str,
df: pd.DataFrame,
target: str,
window: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""View rolling kurtosis
Parameters
----------
name : str
Ticker
df : pd.DataFrame
Dataframe of stock prices
window : int
Length of window
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
data = df[target]
df_kurt = rolling_model.get_kurtosis(data, window)
plot_data = pd.merge(
data,
df_kurt,
how="outer",
left_index=True,
right_index=True,
)
plot_data = reindex_dates(plot_data)
# This plot has 1 axis
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2) = axes
else:
if len(external_axes) != 2:
logger.error("Expected list of two axis items.")
console.print("[red]Expected list of 2 axis items./n[/red]")
return
(ax1, ax2) = external_axes
ax1.set_title(f"{name} {target} Kurtosis Indicator (window {str(window)})")
ax1.plot(plot_data.index, plot_data[target].values)
ax1.set_xlim(plot_data.index[0], plot_data.index[-1])
ax1.set_ylabel(f"{target}")
ax2.plot(
plot_data.index,
plot_data[f"KURT_{window}"].values,
)
ax2.set_ylabel("Indicator")
theme.style_primary_axis(
ax1,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
theme.style_primary_axis(
ax2,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"kurtosis",
df_kurt,
)
```
#### File: common/technical_analysis/overlap_view.py
```python
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
import mplfinance as mpf
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.common.technical_analysis import overlap_model
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
reindex_dates,
lambda_long_number_format_y_axis,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def view_ma(
series: pd.Series,
length: List[int] = None,
offset: int = 0,
ma_type: str = "EMA",
s_ticker: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plots MA technical indicator
Parameters
----------
series : pd.Series
Series of prices
length : List[int]
Length of EMA window
ma_type: str
Type of moving average. Either "EMA" "ZLMA" or "SMA"
s_ticker : str
Ticker
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# Define a dataframe for adding EMA series to it
price_df = pd.DataFrame(series)
l_legend = [s_ticker]
if not length:
length = [20, 50]
for win in length:
if ma_type == "EMA":
df_ta = overlap_model.ema(series, win, offset)
l_legend.append(f"EMA {win}")
elif ma_type == "SMA":
df_ta = overlap_model.sma(series, win, offset)
l_legend.append(f"SMA {win}")
elif ma_type == "WMA":
df_ta = overlap_model.wma(series, win, offset)
l_legend.append(f"WMA {win}")
elif ma_type == "HMA":
df_ta = overlap_model.hma(series, win, offset)
l_legend.append(f"HMA {win}")
elif ma_type == "ZLMA":
df_ta = overlap_model.zlma(series, win, offset)
l_legend.append(f"ZLMA {win}")
price_df = price_df.join(df_ta)
plot_data = reindex_dates(price_df)
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
if len(external_axes) != 1:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of one axis item./n[/red]")
return
(ax,) = external_axes
ax.plot(plot_data.index, plot_data.iloc[:, 1].values)
ax.set_xlim([plot_data.index[0], plot_data.index[-1]])
ax.set_ylabel(f"{s_ticker} Price")
for idx in range(2, plot_data.shape[1]):
ax.plot(plot_data.iloc[:, idx])
ax.set_title(f"{s_ticker} {ma_type.upper()}")
ax.legend(l_legend)
theme.style_primary_axis(
ax,
data_index=plot_data.index.to_list(),
tick_labels=plot_data["date"].to_list(),
)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
f"{ma_type.lower()}{'_'.join([str(win) for win in length])}",
price_df,
)
@log_start_end(log=logger)
def view_vwap(
s_ticker: str,
ohlc: pd.DataFrame,
offset: int = 0,
s_interval: str = "",
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Plots EMA technical indicator
Parameters
----------
s_ticker : str
Ticker
ohlc : pd.DataFrame
Dataframe of prices
offset : int
Offset variable
s_interval : str
Interval of data
export : str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (3 axes are expected in the list), by default None
"""
ohlc.index = ohlc.index.tz_localize(None)
ohlc["Day"] = [idx.date() for idx in ohlc.index]
day_df = ohlc[ohlc.Day == ohlc.Day[-1]]
df_vwap = overlap_model.vwap(day_df, offset)
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": True,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1.2, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
}
# This plot has 2 axes
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
candle_chart_kwargs["addplot"] = mpf.make_addplot(
df_vwap, width=theme.line_width
)
fig, ax = mpf.plot(day_df, **candle_chart_kwargs)
fig.suptitle(
f"{s_ticker} {s_interval} VWAP",
x=0.055,
y=0.965,
horizontalalignment="left",
)
lambda_long_number_format_y_axis(day_df, "Volume", ax)
theme.visualize_output(force_tight_layout=False)
else:
if len(external_axes) != 3:
logger.error("Expected list of three axis items.")
console.print("[red]Expected list of 3 axis items./n[/red]")
return
(ax1, ax2, ax3) = external_axes
candle_chart_kwargs["ax"] = ax1
candle_chart_kwargs["volume"] = ax2
candle_chart_kwargs["addplot"] = mpf.make_addplot(
df_vwap, width=theme.line_width, ax=ax3
)
mpf.plot(day_df, **candle_chart_kwargs)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)).replace("common", "stocks"),
"VWAP",
df_vwap,
)
```
#### File: cryptocurrency/due_diligence/binance_model.py
```python
__docformat__ = "numpy"
import argparse
import logging
from collections import defaultdict
from typing import List, Tuple, Union
import pandas as pd
from binance.client import Client
from binance.exceptions import BinanceAPIException
import openbb_terminal.config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def _get_trading_pairs() -> List[dict]:
"""Helper method that return all trading pairs on binance. Other methods are use this data as an input for e.g
building dataframe with all coins, or to build dict of all trading pairs. [Source: Binance]
Returns
-------
List[dict]
list of dictionaries in format:
[
{'symbol': 'ETHBTC', 'status': 'TRADING', 'baseAsset': 'ETH', 'baseAssetPrecision': 8,
'quoteAsset': 'BTC', 'quotePrecision': 8, 'quoteAssetPrecision': 8, 'baseCommissionPrecision': 8,
'quoteCommissionPrecision': 8,
'orderTypes': ['LIMIT', 'LIMIT_MAKER', 'MARKET', 'STOP_LOSS_LIMIT', 'TAKE_PROFIT_LIMIT'],
'icebergAllowed': True,
'ocoAllowed': True,
'quoteOrderQtyMarketAllowed': True,
'isSpotTradingAllowed': True,
'isMarginTradingAllowed': True,
'filters': [{'filterType': 'PRICE_FILTER', 'minPrice': '0.00000100',
'maxPrice': '922327.00000000', 'tickSize': '0.00000100'},
{'filterType': 'PERCENT_PRICE', 'multiplierUp': '5', 'multiplierDown': '0.2', 'avgPriceMins': 5},
{'filterType': 'LOT_SIZE', 'minQty': '0.00100000', 'maxQty': '100000.00000000', 'stepSize': '0.00100000'},
{'filterType': 'MIN_NOTIONAL', 'minNotional': '0.00010000', 'applyToMarket': True, 'avgPriceMins': 5},
{'filterType': 'ICEBERG_PARTS', 'limit': 10}, {'filterType': 'MARKET_LOT_SIZE', 'minQty': '0.00000000',
'maxQty': '930.49505347', 'stepSize': '0.00000000'}, {'filterType': 'MAX_NUM_ORDERS', 'maxNumOrders': 200},
{'filterType': 'MAX_NUM_ALGO_ORDERS', 'maxNumAlgoOrders': 5}], 'permissions': ['SPOT', 'MARGIN']},
...
]
"""
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
ex_info = client.get_exchange_info()["symbols"]
trading_pairs = [p for p in ex_info if p["status"] == "TRADING"]
return trading_pairs
@log_start_end(log=logger)
def get_all_binance_trading_pairs() -> pd.DataFrame:
"""Returns all available pairs on Binance in DataFrame format. DataFrame has 3 columns symbol, baseAsset, quoteAsset
example row: ETHBTC | ETH | BTC
[Source: Binance]
Returns
-------
pd.DataFrame
All available pairs on Binance
Columns: symbol, baseAsset, quoteAsset
"""
trading_pairs = _get_trading_pairs()
return pd.DataFrame(trading_pairs)[["symbol", "baseAsset", "quoteAsset"]]
@log_start_end(log=logger)
def get_binance_available_quotes_for_each_coin() -> dict:
"""Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]
Returns
-------
dict:
All quote assets for given coin
{'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]
"""
trading_pairs = _get_trading_pairs()
results = defaultdict(list)
for pair in trading_pairs:
results[pair["baseAsset"]].append(pair["quoteAsset"])
return results
@log_start_end(log=logger)
def check_valid_binance_str(symbol: str) -> str:
"""Check if symbol is in defined binance. [Source: Binance]"""
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
try:
client.get_avg_price(symbol=symbol.upper())
return symbol.upper()
except BinanceAPIException as e:
logger.exception("%s is not a valid binance symbol", str(symbol))
raise argparse.ArgumentTypeError(
f"{symbol} is not a valid binance symbol"
) from e
@log_start_end(log=logger)
def show_available_pairs_for_given_symbol(
symbol: str = "ETH",
) -> Tuple[Union[str, None], list]:
"""Return all available quoted assets for given symbol. [Source: Binance]
Parameters
----------
symbol:
Uppercase symbol of coin e.g BTC, ETH, UNI, LUNA, DOT ...
Returns
-------
str:
Coin symbol
list:
Quoted assets for given symbol: ["BTC", "USDT" , "BUSD"]
"""
symbol_upper = symbol.upper()
pairs = get_binance_available_quotes_for_each_coin()
for k, v in pairs.items():
if k == symbol_upper:
return k, v
console.print(f"Couldn't find anything for symbol {symbol_upper}\n")
return None, []
```
#### File: cryptocurrency/overview/cryptopanic_model.py
```python
__docformat__ = "numpy"
import logging
import math
import textwrap
import time
from typing import Any, Optional
import pandas as pd
import requests
import openbb_terminal.config_terminal as cfg
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
CATEGORIES = ["news", "media"]
SORT_FILTERS = [
"published_at",
"domain",
"title",
"negative_votes",
"positive_votes",
]
FILTERS = [
"rising",
"hot",
"bullish",
"bearish",
"important",
"saved",
"lol",
]
REGIONS = ["en", "de", "es", "fr", "nl", "it", "pt", "ru"]
class ApiKeyException(Exception):
"""Api Key Exception object"""
@log_start_end(log=logger)
def __init__(self, message: str):
super().__init__(message)
self.message = message
@log_start_end(log=logger)
def __str__(self) -> str:
return f"ApiKeyException: {self.message}"
@log_start_end(log=logger)
def make_request(**kwargs: Any) -> Optional[dict]:
"""Helper methods for requests [Source: https://cryptopanic.com/developers/api/]
Parameters
----------
kwargs: Any
Keyword arguments with parameters for GET request to cryptopanic api.
Returns
-------
dict:
response from api request
"""
api_key = cfg.API_CRYPTO_PANIC_KEY
crypto_panic_url = "https://cryptopanic.com/api/v1"
post_kind = kwargs.get("post_kind")
filter_ = kwargs.get("filter_")
region = kwargs.get("region")
if post_kind not in ["news", "media", "all"]:
post_kind = "all"
url = f"{crypto_panic_url}/posts/?auth_token={api_key}" + f"&kind={post_kind}"
if filter_ and filter_ in [
"rising",
"hot",
"bullish",
"bearish",
"important",
"saved",
"lol",
]:
url += f"&filter={filter_}"
if region and region in REGIONS:
url += f"®ions={region}"
response = requests.get(url)
result = None
if response.status_code == 200:
result = response.json()
else:
if "Token not found" in response.json()["info"]:
console.print("[red]Invalid API Key[/red]\n")
else:
console.print(response.json()["info"])
logger.warning("Invalid authentication: %s", response.text)
return result
@log_start_end(log=logger)
def _parse_post(post: dict) -> dict:
"""Helper method - parse news response object to dictionary with target structure.
[Source: https://cryptopanic.com/]
Parameters
----------
post: dict
Response object from cryptopanic api.
Returns
-------
dict
Parsed dictionary with target data structure.
"""
return {
"published_at": post.get("published_at"),
"domain": post.get("domain"),
"title": post.get("title"),
"negative_votes": post["votes"].get("negative"),
"positive_votes": post["votes"].get("positive"),
"link": post["url"],
}
@log_start_end(log=logger)
def get_news(
limit: int = 60,
post_kind: str = "news",
filter_: Optional[str] = None,
region: str = "en",
) -> pd.DataFrame:
"""Get recent posts from CryptoPanic news aggregator platform. [Source: https://cryptopanic.com/]
Parameters
----------
limit: int
number of news to fetch
post_kind: str
Filter by category of news. Available values: news or media.
filter_: Optional[str]
Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol
region: str
Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch), es (Español),
fr (Français), it (Italiano), pt (Português), ru (Русский)
Returns
-------
pd.DataFrame
DataFrame with recent news from different sources filtered by provided parameters.
"""
if post_kind not in CATEGORIES:
post_kind = "news"
results = []
response = make_request(post_kind=post_kind, filter_=filter_, region=region)
if response:
data, next_page, _ = (
response["results"],
response.get("next"),
response.get("count"),
)
for post in data:
results.append(_parse_post(post))
number_of_pages = math.ceil(limit // 20)
counter = 0
while counter < number_of_pages and next_page:
counter += 1
try:
time.sleep(0.2)
res = requests.get(next_page).json()
for post in res["results"]:
results.append(_parse_post(post))
next_page = res.get("next")
except Exception as e: # noqa: F841
logger.exception(str(e))
console.print(
"[red]Something went wrong while fetching news from API[/red]\n"
)
return pd.DataFrame()
try:
df = pd.DataFrame(results)
df["title"] = df["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=66))
if isinstance(x, str)
else x
)
return df
except Exception as e: # noqa: F841
logger.exception(str(e))
console.print("[red]Something went wrong with DataFrame creation[/red]\n")
return pd.DataFrame()
return pd.DataFrame()
```
#### File: etf/screener/screener_controller.py
```python
__docformat__ = "numpy"
# pylint:disable=R0904,C0201
import argparse
import configparser
import logging
import os
from typing import List
from prompt_toolkit.completion import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.etf import financedatabase_model, financedatabase_view
from openbb_terminal.etf.screener import screener_view
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
parse_known_args_and_warn,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
presets_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "presets/")
class ScreenerController(BaseController):
"""Screener Controller class"""
CHOICES_COMMANDS = [
"view",
"set",
"screen",
"sbc",
]
presets_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "presets/")
preset_choices = [
f.split(".")[0] for f in os.listdir(presets_path) if f.endswith(".ini")
]
sortby_screen_choices = [
"Assets",
"NAV",
"Expense",
"PE",
"SharesOut",
"Div",
"DivYield",
"Volume",
"Open",
"PrevClose",
"YrLow",
"YrHigh",
"Beta",
"N_Hold",
]
PATH = "/etf/scr/"
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.preset = "etf_config"
self.screen_tickers: List = list()
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["view"] = {c: None for c in self.preset_choices}
choices["set"] = {c: None for c in self.preset_choices}
choices["sbc"] = {
c: None for c in financedatabase_model.get_etfs_categories()
}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
help_text = f"""[cmds]
view view available presets
set set one of the available presets[/cmds]
[param]PRESET: [/param]{self.preset}[cmds]
screen screen ETF using preset selected [src][StockAnalysis][/src]
sbc screen by category [src][FinanceDatabase][/src][/cmds]
"""
console.print(text=help_text, menu="ETF - Screener")
@log_start_end(log=logger)
def call_view(self, other_args: List[str]):
"""Process view command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="view",
description="""View available presets under presets folder.""",
)
parser.add_argument(
"-p",
"--preset",
action="store",
dest="preset",
type=str,
help="View specific custom preset",
default="",
choices=self.preset_choices,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if ns_parser.preset:
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
preset_filter.read(presets_path + ns_parser.preset + ".ini")
headers = [
"Price",
"Assets",
"NAV",
"Expense",
"PE",
"DivYield",
"Volume",
"Beta",
"N_Hold",
"Open",
"PrevClose",
"YrLow",
"YrHigh",
]
console.print("")
for filter_header in headers:
console.print(f" - {filter_header} -")
d_filters = {**preset_filter[filter_header]}
d_filters = {k: v for k, v in d_filters.items() if v}
if d_filters:
max_len = len(max(d_filters, key=len))
for key, value in d_filters.items():
console.print(f"{key}{(max_len-len(key))*' '}: {value}")
console.print("")
else:
console.print("\nPresets:")
for preset in self.preset_choices:
with open(
presets_path + preset + ".ini",
encoding="utf8",
) as f:
description = ""
for line in f:
if line.strip() == "[Price]":
break
description += line.strip()
console.print(
f" {preset}{(30-len(preset)) * ' '}{description.split('Description: ')[1].replace('#', '')}"
)
console.print("")
@log_start_end(log=logger)
def call_set(self, other_args: List[str]):
"""Process set command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="set",
description="""Set preset.""",
)
parser.add_argument(
"-p",
"--preset",
action="store",
dest="preset",
type=str,
default="template",
help="Filter presets",
choices=self.preset_choices,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.preset = ns_parser.preset
console.print("")
@log_start_end(log=logger)
def call_screen(self, other_args):
"""Process screen command"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="screen",
add_help=False,
description="Screens ETFS from a personal scraping github repository. Data scraped from stockanalysis.com",
)
parser.add_argument(
"-l",
"--limit",
type=int,
help="Limit of etfs to display",
dest="limit",
default=10,
)
parser.add_argument(
"-s",
"--sort",
dest="sortby",
type=str,
help="Sort by given column. Default: Assets",
default="Assets",
choices=self.sortby_screen_choices,
)
parser.add_argument(
"-a",
"--ascend",
action="store_true",
help="Flag to sort in ascending order (lowest on top)",
dest="ascend",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-l")
ns_parser = parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
screener_view.view_screener(
preset=self.preset,
num_to_show=ns_parser.limit,
sortby=ns_parser.sortby,
ascend=ns_parser.ascend,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_sbc(self, other_args: List[str]):
"""Process sbc command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sbc",
description="Search by category [Source: FinanceDatabase/StockAnalysis.com]",
)
parser.add_argument(
"-c",
"--category",
type=str,
dest="category",
nargs="+",
help="Category to look for",
required="-h" not in other_args,
)
parser.add_argument(
"-l",
"--limit",
type=check_positive,
dest="limit",
help="Limit of ETFs to display",
default=5,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
category = " ".join(ns_parser.category)
if category in financedatabase_model.get_etfs_categories():
financedatabase_view.display_etf_by_category(
category=category,
limit=ns_parser.limit,
export=ns_parser.export,
)
else:
console.print(
"The category selected does not exist, choose one from:"
f" {', '.join(financedatabase_model.get_etfs_categories())}\n"
)
```
#### File: GamestonkTerminal/openbb_terminal/feature_flags.py
```python
import os
import json
from distutils.util import strtobool
from typing import Union, Optional
from dotenv import load_dotenv
OPENBB_defaults_path = os.path.join(os.path.dirname(__file__), "OPENBB_DEFAULTS.json")
if os.path.exists(OPENBB_defaults_path):
with open(OPENBB_defaults_path) as f:
OPENBB_DEFAULTS = json.load(f)
else:
OPENBB_DEFAULTS = dict()
env_files = [f for f in os.listdir() if f.endswith(".env")]
if env_files:
load_dotenv(env_files[0])
def assign_feature_flag(
feature_flag: str, fallback_value: str, return_bool: bool = False
) -> Optional[Union[str, bool]]:
"""Get the feature flag value in order of priority.
- Env variables have the highest priority
- The OPENBB_DEFAULTS dictionary has the defaults for the bundled app
- Fallback value is used if none of the above mentioned places have feature flag settings
Parameters
----------
feature_flag : str
Feature flag as upper-case string
fallback_value : str
Fallback vabue
return_bool : bool, optional
If a boolean should be returned vs a string, by default False
Returns
-------
Optional[Union[str, bool]]
The feature flag value or None
"""
if bool(os.getenv(feature_flag)):
feature_flag_value = (
strtobool(os.getenv(feature_flag)) # type: ignore
if return_bool
else os.getenv(feature_flag)
)
elif feature_flag in OPENBB_DEFAULTS:
feature_flag_value = (
strtobool(OPENBB_DEFAULTS[feature_flag])
if return_bool
else OPENBB_DEFAULTS[feature_flag]
)
else:
feature_flag_value = (
strtobool(fallback_value) if return_bool else fallback_value
)
return feature_flag_value
# Use tabulate to print dataframes
USE_TABULATE_DF = assign_feature_flag("OPENBB_USE_TABULATE_DF", "True", True)
# Use clear console after each command
USE_CLEAR_AFTER_CMD = assign_feature_flag("OPENBB_USE_CLEAR_AFTER_CMD", "False", True)
# Use coloring features
USE_COLOR = assign_feature_flag("OPENBB_USE_COLOR", "True", True)
# Select console flair (choose from config_terminal.py list)
USE_FLAIR = assign_feature_flag("OPENBB_USE_FLAIR", ":openbb")
# Add date and time to command line
USE_DATETIME = assign_feature_flag("OPENBB_USE_DATETIME", "True", True)
# Enable interactive matplotlib mode
USE_ION = assign_feature_flag("OPENBB_USE_ION", "True", True)
# Enable watermark in the figures
USE_WATERMARK = assign_feature_flag("OPENBB_USE_WATERMARK", "True", True)
# Enable command and source in the figures
USE_CMD_LOCATION_FIGURE = assign_feature_flag(
"OPENBB_USE_CMD_LOCATION_FIGURE", "True", True
)
# Enable Prompt Toolkit
USE_PROMPT_TOOLKIT = assign_feature_flag("OPENBB_USE_PROMPT_TOOLKIT", "True", True)
# Enable Prediction features
ENABLE_PREDICT = assign_feature_flag("OPENBB_ENABLE_PREDICT", "True", True)
# Enable plot autoscaling
USE_PLOT_AUTOSCALING = assign_feature_flag("OPENBB_USE_PLOT_AUTOSCALING", "False", True)
# Enable thoughts of the day
ENABLE_THOUGHTS_DAY = assign_feature_flag("OPENBB_ENABLE_THOUGHTS_DAY", "False", True)
# Quick exit for testing
ENABLE_QUICK_EXIT = assign_feature_flag("OPENBB_ENABLE_QUICK_EXIT", "False", True)
# Open report as HTML, otherwise notebook
OPEN_REPORT_AS_HTML = assign_feature_flag("OPENBB_OPEN_REPORT_AS_HTML", "True", True)
# Enable auto print_help when exiting menus
ENABLE_EXIT_AUTO_HELP = assign_feature_flag(
"OPENBB_ENABLE_EXIT_AUTO_HELP", "False", True
)
# Remember contexts during session
REMEMBER_CONTEXTS = assign_feature_flag("OPENBB_REMEMBER_CONTEXTS", "True", True)
# Use the colorful rich terminal
ENABLE_RICH = assign_feature_flag("OPENBB_ENABLE_RICH", "True", True)
# Use the colorful rich terminal
ENABLE_RICH_PANEL = assign_feature_flag("OPENBB_ENABLE_RICH_PANEL", "True", True)
# Check API KEYS before running a command
ENABLE_CHECK_API = assign_feature_flag("OPENBB_ENABLE_CHECK_API", "True", True)
# Send logs to data lake
LOG_COLLECTION = bool(assign_feature_flag("OPENBB_LOG_COLLECTION", "True", True))
# Provide export folder path. If empty that means default.
EXPORT_FOLDER_PATH = assign_feature_flag("OPENBB_EXPORT_FOLDER_PATH", "")
# Set a flag if the application is running from a packaged bundle
PACKAGED_APPLICATION = assign_feature_flag("OPENBB_PACKAGED_APPLICATION", "False", True)
LOGGING_COMMIT_HASH = str(
assign_feature_flag("OPENBB_LOGGING_COMMIT_HASH", "REPLACE_ME")
)
```
#### File: openbb_terminal/jupyter/widget_helpers.py
```python
import os
from typing import List
from jinja2 import Template
def price_card_stylesheet():
"""Load a default CSS stylesheet from file."""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "style.css")
) as f:
style = f.read()
return style
def price_card(ticker: str, price: str, price_color: str = "neutral_color") -> str:
"""Prepare a styled HTML element of a 128 by 128 price card.
Parameters
----------
ticker : str
Instrument ticker for the price card
price : str
Instrument price as a string
price_color : str, optional
The color of the price. Accepts "up_color", "down_color" and default "neutral_color"
Returns
-------
str
HTML code as string
"""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "card.j2")
) as f:
template = Template(f.read())
card = template.render(ticker=ticker, price=price, price_color=price_color)
return card
def html_report_stylesheet():
"""Load a default CSS stylesheet from file."""
with open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "widgets", "report.css"
)
) as f:
style = f.read()
return style
def html_report(title: str = "", stylesheet: str = "", body: str = "") -> str:
"""Prepare a styled HTML page element.
Parameters
----------
title : str, optional
Contents of the title tag, by default ""
stylesheet : str, optional
Contents of the stylesheet tag, by default ""
body : str, optional
Contents of the body tag, by default ""
Returns
-------
str
HTML code as string
"""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "report.j2")
) as f:
template = Template(f.read())
card = template.render(title=title, stylesheet=stylesheet, body=body)
return card
def h(level: int, text: str) -> str:
"""Wrap text into an HTML `h` tag."""
return f"<h{str(level)}>{text}</h{level}>"
def p(text: str) -> str:
"""Wrap text into an HTML `p` tag."""
return f"<p>{text}</p>"
def row(elements: List) -> str:
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "row.j2")
) as f:
template = Template(f.read())
return template.render(elements=elements)
```
#### File: stocks/behavioural_analysis/finnhub_view.py
```python
__docformat__ = "numpy"
import logging
import os
from datetime import datetime, timedelta
from typing import Optional, List
import numpy as np
import pandas as pd
import yfinance as yf
from matplotlib import pyplot as plt
from openbb_terminal.stocks.behavioural_analysis import finnhub_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
)
from openbb_terminal.rich_config import console
from openbb_terminal.decorators import check_api_key
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_FINNHUB_KEY"])
def display_stock_price_headlines_sentiment(
ticker: str,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display stock price and headlines sentiment using VADER model over time. [Source: Finnhub]
Parameters
----------
ticker : str
Ticker of company
export: str
Format to export data
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
start = datetime.now() - timedelta(days=30)
articles = finnhub_model.get_company_news(
ticker.upper(),
s_start=start.strftime("%Y-%m-%d"),
s_end=datetime.now().strftime("%Y-%m-%d"),
)
sentiment = finnhub_model.process_news_headlines_sentiment(articles)
if not sentiment.empty:
sentiment_data = [item for sublist in sentiment.values for item in sublist]
df_stock = yf.download(
ticker,
start=min(sentiment.index).to_pydatetime().date(),
interval="15m",
prepost=True,
progress=False,
)
if not df_stock.empty:
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(
figsize=plot_autoscale(),
dpi=PLOT_DPI,
nrows=2,
ncols=1,
sharex=True,
gridspec_kw={"height_ratios": [2, 1]},
)
else:
if len(external_axes) != 1:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of one axis item./n[/red]")
return
(ax,) = external_axes
ax[0].set_title(f"Headlines sentiment and {ticker} price")
for uniquedate in np.unique(df_stock.index.date):
ax[0].plot(
df_stock[df_stock.index.date == uniquedate].index,
df_stock[df_stock.index.date == uniquedate]["Adj Close"].values,
c="#FCED00",
)
ax[0].set_ylabel("Stock Price")
theme.style_primary_axis(ax[0])
theme.style_primary_axis(ax[1])
ax[1].plot(
sentiment.index,
pd.Series(sentiment_data)
.rolling(window=5, min_periods=1)
.mean()
.values,
c="#FCED00",
)
ax[1].bar(
sentiment[sentiment.values >= 0].index,
[
item
for sublist in sentiment[sentiment.values >= 0].values
for item in sublist
],
color=theme.up_color,
width=0.01,
)
ax[1].bar(
sentiment[sentiment.values < 0].index,
[
item
for sublist in sentiment[sentiment.values < 0].values
for item in sublist
],
color=theme.down_color,
width=0.01,
)
ax[1].yaxis.set_label_position("right")
ax[1].set_ylabel("Headline Sentiment")
if external_axes is None:
theme.visualize_output()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "snews", sentiment
)
```
#### File: stocks/comparison_analysis/finviz_compare_model.py
```python
__docformat__ = "numpy"
import logging
from typing import List, Tuple
import pandas as pd
from finvizfinance.screener import (
financial,
overview,
ownership,
performance,
technical,
valuation,
)
from finvizfinance.screener.overview import Overview
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_similar_companies(
ticker: str, compare_list: List[str]
) -> Tuple[List[str], str]:
"""Get similar companies from Finviz
Parameters
----------
ticker : str
Ticker to find comparisons for
compare_list : List[str]
List of fields to compare, ["Sector", "Industry", "Country"]
Returns
-------
List[str]
List of similar companies
str
String containing data source
"""
try:
similar = (
Overview().compare(ticker, compare_list, verbose=0)["Ticker"].to_list()
)
user = "Finviz"
except Exception as e:
logger.exception(str(e))
console.print(e)
similar = [""]
user = "Error"
return similar, user
@log_start_end(log=logger)
def get_comparison_data(data_type: str, similar: List[str]):
"""Screener Overview
Parameters
----------
data_type : str
Data type between: overview, valuation, financial, ownership, performance, technical
Returns
----------
pd.DataFrame
Dataframe with overview, valuation, financial, ownership, performance or technical
"""
if data_type == "overview":
screen = overview.Overview()
elif data_type == "valuation":
screen = valuation.Valuation()
elif data_type == "financial":
screen = financial.Financial()
elif data_type == "ownership":
screen = ownership.Ownership()
elif data_type == "performance":
screen = performance.Performance()
elif data_type == "technical":
screen = technical.Technical()
else:
console.print("Invalid selected screener type")
return pd.DataFrame()
screen.set_filter(ticker=",".join(similar))
try:
return screen.screener_view(verbose=0)
except IndexError:
console.print("[red]Invalid data from website[red]")
return pd.DataFrame()
```
#### File: stocks/due_diligence/business_insider_view.py
```python
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.due_diligence import business_insider_model
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def price_target_from_analysts(
ticker: str,
start: str,
interval: str,
stock: DataFrame,
num: int,
raw: bool,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display analysts' price targets for a given stock. [Source: Business Insider]
Parameters
----------
ticker : str
Due diligence ticker symbol
start : str
Start date of the stock data
interval : str
Stock data interval
stock : DataFrame
Due diligence stock dataframe
num : int
Number of latest price targets from analysts to print
raw : bool
Display raw data only
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df_analyst_data = business_insider_model.get_price_target_from_analysts(ticker)
if raw:
df_analyst_data.index = df_analyst_data.index.strftime("%Y-%m-%d")
print_rich_table(
df_analyst_data.sort_index(ascending=False).head(num),
headers=list(df_analyst_data.columns),
show_index=True,
title="Analyst Price Targets",
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
if len(external_axes) != 1:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of one axis item./n[/red]")
return
(ax,) = external_axes
# Slice start of ratings
if start:
df_analyst_data = df_analyst_data[start:] # type: ignore
if interval == "1440min":
ax.plot(stock.index, stock["Adj Close"].values)
legend_price_label = "Adjusted closing price"
# Intraday
else:
ax.plot(stock.index, stock["Close"].values)
legend_price_label = "Closing price"
if start:
ax.plot(df_analyst_data.groupby(by=["Date"]).mean()[start:]) # type: ignore
else:
ax.plot(df_analyst_data.groupby(by=["Date"]).mean())
ax.scatter(
df_analyst_data.index,
df_analyst_data["Price Target"],
color=theme.down_color,
edgecolors=theme.up_color,
zorder=2,
)
ax.legend([legend_price_label, "Average Price Target", "Price Target"])
ax.set_title(f"{ticker} (Time Series) and Price Target")
ax.set_xlim(stock.index[0], stock.index[-1])
ax.set_ylabel("Share Price")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt",
df_analyst_data,
)
@log_start_end(log=logger)
def estimates(ticker: str, export: str):
"""Display analysts' estimates for a given ticker. [Source: Business Insider]
Parameters
----------
ticker : str
Ticker to get analysts' estimates
export : str
Export dataframe data to csv,json,xlsx file
"""
(
df_year_estimates,
df_quarter_earnings,
df_quarter_revenues,
) = business_insider_model.get_estimates(ticker)
print_rich_table(
df_year_estimates,
headers=list(df_year_estimates.columns),
show_index=True,
title="Annual Earnings Estimates",
)
console.print("")
print_rich_table(
df_quarter_earnings,
headers=list(df_quarter_earnings.columns),
show_index=True,
title="Quarterly Earnings Estimates",
)
console.print("")
print_rich_table(
df_quarter_revenues,
headers=list(df_quarter_revenues.columns),
show_index=True,
title="Quarterly Revenue Estimates",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt_year",
df_year_estimates,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt_qtr_earnings",
df_quarter_earnings,
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"pt_qtr_revenues",
df_quarter_revenues,
)
```
#### File: openbb_terminal/economy/test_cnn_view.py
```python
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.economy import cnn_view
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_get_feargreed_report(mocker):
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
# MOCK PLOT_AUTOSCALE
mocker.patch(
target="openbb_terminal.economy.cnn_view.plot_autoscale",
return_value=(8.0, 5.0),
)
# MOCK PLOT_DPI
mocker.patch(
target="openbb_terminal.economy.cnn_view.PLOT_DPI",
new=100,
)
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.economy.cnn_view.export_data")
cnn_view.fear_and_greed_index(indicator="sps", export="")
```
#### File: portfolio/portfolio_optimization/test_po_controller.py
```python
import os
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.portfolio.portfolio_optimization import po_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["load", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
path_controller = "openbb_terminal.portfolio.portfolio_optimization.po_controller"
# MOCK SWITCH
mocker.patch(
target=f"{path_controller}.PortfolioOptimizationController.switch",
return_value=["quit"],
)
result_menu = po_controller.PortfolioOptimizationController(queue=queue).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
path_controller = "openbb_terminal.portfolio.portfolio_optimization.po_controller"
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.parent_classes.session",
)
mocker.patch(
target="openbb_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
mocker.patch.object(
target=po_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target=f"{path_controller}.session",
)
mocker.patch(
target=f"{path_controller}.session.prompt",
return_value="quit",
)
result_menu = po_controller.PortfolioOptimizationController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
path_controller = "openbb_terminal.portfolio.portfolio_optimization.po_controller"
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=po_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target=f"{path_controller}.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=f"{path_controller}.PortfolioOptimizationController.switch",
new=mock_switch,
)
result_menu = po_controller.PortfolioOptimizationController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = po_controller.PortfolioOptimizationController(queue=None)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["home", "help"]),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
(
"r",
["quit", "quit", "reset", "portfolio", "po"],
),
],
)
def test_switch(an_input, expected_queue):
controller = po_controller.PortfolioOptimizationController(queue=None)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = po_controller.PortfolioOptimizationController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
["quit", "quit", "reset", "portfolio", "po"],
),
(
"call_reset",
["help"],
["quit", "quit", "reset", "portfolio", "po", "help"],
),
],
)
def test_call_func_expect_queue(expected_queue, func, queue):
controller = po_controller.PortfolioOptimizationController(queue=queue)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, other_args, mocked_func, called_args, called_kwargs",
[
(
"call_select",
[],
"PortfolioOptimizationController.add_stocks",
[],
dict(),
),
(
"call_add",
[],
"PortfolioOptimizationController.add_stocks",
[],
dict(),
),
(
"call_rmv",
[],
"PortfolioOptimizationController.rmv_stocks",
[],
dict(),
),
(
"call_equal",
[],
"optimizer_view.display_equal_weight",
[],
dict(),
),
(
"call_mktcap",
[],
"optimizer_view.display_property_weighting",
[],
dict(),
),
(
"call_dividend",
[],
"optimizer_view.display_property_weighting",
[],
dict(),
),
(
"call_property",
["--property=open"],
"optimizer_view.display_property_weighting",
[],
dict(),
),
(
"call_maxsharpe",
[],
"optimizer_view.display_max_sharpe",
[],
dict(),
),
(
"call_minvol",
[],
"optimizer_view.display_min_volatility",
[],
dict(),
),
(
"call_maxquadutil",
[],
"optimizer_view.display_max_quadratic_utility",
[],
dict(),
),
(
"call_effrisk",
[],
"optimizer_view.display_efficient_risk",
[],
dict(),
),
(
"call_effret",
[],
"optimizer_view.display_efficient_return",
[],
dict(),
),
(
"call_ef",
[],
"optimizer_view.display_ef",
[],
dict(),
),
(
"call_yolo",
[],
"",
[],
dict(),
),
(
"add_stocks",
[],
"",
[],
dict(),
),
(
"rmv_stocks",
[],
"",
[],
dict(),
),
],
)
def test_call_func(
tested_func, mocked_func, other_args, called_args, called_kwargs, mocker
):
path_controller = "openbb_terminal.portfolio.portfolio_optimization.po_controller"
if mocked_func:
mock = mocker.Mock()
mocker.patch(
target=f"{path_controller}.{mocked_func}",
new=mock,
)
controller = po_controller.PortfolioOptimizationController(queue=None)
controller.tickers = ["AAPL", "MSFT"]
getattr(controller, tested_func)(other_args)
if called_args or called_kwargs:
mock.assert_called_once_with(*called_args, **called_kwargs)
else:
mock.assert_called_once()
else:
controller = po_controller.PortfolioOptimizationController(queue=None)
getattr(controller, tested_func)(other_args)
```
#### File: stocks/technical_analysis/test_ta_controller.py
```python
import os
from datetime import datetime
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.technical_analysis import ta_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
EMPTY_DF = pd.DataFrame()
MOCK_STOCK_DF = pd.read_csv(
"tests/openbb_terminal/stocks/technical_analysis/csv/test_ta_controller/stock_df.csv",
index_col=0,
)
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "MOCK_PERIOD_1"),
("period2", "MOCK_PERIOD_2"),
("date", "MOCK_DATE"),
],
}
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["load", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
path_controller = "openbb_terminal.stocks.technical_analysis.ta_controller"
# MOCK SWITCH
mocker.patch(
target=f"{path_controller}.TechnicalAnalysisController.switch",
return_value=["quit"],
)
result_menu = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=queue,
).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
path_controller = "openbb_terminal.stocks.technical_analysis.ta_controller"
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.parent_classes.session",
)
mocker.patch(
target="openbb_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=ta_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target=f"{path_controller}.session",
)
mocker.patch(
target=f"{path_controller}.session.prompt",
return_value="quit",
)
result_menu = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
path_controller = "openbb_terminal.stocks.technical_analysis.ta_controller"
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=ta_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target=f"{path_controller}.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=f"{path_controller}.TechnicalAnalysisController.switch",
new=mock_switch,
)
result_menu = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"start",
[
datetime.strptime("2021-12-01", "%Y-%m-%d"),
None,
],
)
def test_print_help(start):
controller = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=start,
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=None,
)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
(
"/help",
[
"home",
"help",
],
),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
(
"r",
[
"quit",
"quit",
"reset",
"stocks",
"load MOCK_TICKER",
"ta",
],
),
],
)
def test_switch(an_input, expected_queue):
controller = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=MOCK_STOCK_DF,
queue=None,
)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=None,
)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
[
"quit",
"quit",
"reset",
"stocks",
"load MOCK_TICKER",
"ta",
],
),
(
"call_reset",
["help"],
[
"quit",
"quit",
"reset",
"stocks",
"load MOCK_TICKER",
"ta",
"help",
],
),
],
)
def test_call_func_expect_queue(expected_queue, func, queue):
controller = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=queue,
)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, other_args, mocked_func, called_args, called_kwargs",
[
(
"call_load",
[
"MOCK_TICKER",
"--start=2021-12-01",
"--end=2021-12-02",
"--interval=1",
"--source=yf",
"--prepost",
],
"stocks_helper.load",
[
"MOCK_TICKER",
datetime.strptime("2021-12-01", "%Y-%m-%d"),
1,
datetime.strptime("2021-12-02", "%Y-%m-%d"),
True,
"yf",
],
{"weekly": False, "monthly": False},
),
(
"call_view",
[],
"finviz_view.view",
[
"MOCK_TICKER",
],
dict(),
),
(
"call_summary",
[],
"finbrain_view.technical_summary_report",
[
"MOCK_TICKER",
],
dict(),
),
(
"call_recom",
[
"--screener=cfd",
"--exchange=MOCK_EXCHANGE",
"--interval=1m",
"--export=csv",
],
"tradingview_view.print_recommendation",
[],
dict(
ticker="MOCK_TICKER",
screener="cfd",
exchange="MOCK_EXCHANGE",
interval="1m",
export="csv",
),
),
(
"call_ema",
[
"1,2",
"--offset=2",
"--export=csv",
],
"overlap_view.view_ma",
[],
dict(
ma_type="EMA",
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=[1, 2],
offset=2,
export="csv",
),
),
(
"call_sma",
[
"1,2",
"--offset=2",
"--export=csv",
],
"overlap_view.view_ma",
[],
dict(
ma_type="SMA",
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=[1, 2],
offset=2,
export="csv",
),
),
(
"call_wma",
[
"1,2",
"--offset=2",
"--export=csv",
],
"overlap_view.view_ma",
[],
dict(
ma_type="WMA",
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=[1, 2],
offset=2,
export="csv",
),
),
(
"call_hma",
[
"1,2",
"--offset=2",
"--export=csv",
],
"overlap_view.view_ma",
[],
dict(
ma_type="HMA",
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=[1, 2],
offset=2,
export="csv",
),
),
(
"call_vwap",
[
"--offset=2",
"--export=csv",
],
"overlap_view.view_vwap",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
s_interval="MOCK_INTERVAL",
offset=2,
export="csv",
),
),
(
"call_zlma",
[
"1,2",
"--offset=2",
"--export=csv",
],
"overlap_view.view_ma",
[],
dict(
ma_type="ZLMA",
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=[1, 2],
offset=2,
export="csv",
),
),
(
"call_cci",
[
"--length=1",
"--scalar=2",
"--export=csv",
],
"momentum_view.display_cci",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
length=1,
scalar=2,
export="csv",
),
),
(
"call_macd",
[
"--fast=1",
"--slow=2",
"--signal=3",
"--export=csv",
],
"momentum_view.display_macd",
[],
dict(
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
n_fast=1,
n_slow=2,
n_signal=3,
export="csv",
),
),
(
"call_rsi",
[
"1",
"--scalar=2",
"--drift=3",
"--export=csv",
],
"momentum_view.display_rsi",
[],
dict(
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=1,
scalar=2,
drift=3,
export="csv",
),
),
(
"call_stoch",
[
"--fastkperiod=1",
"--slowdperiod=2",
"--slowkperiod=3",
"--export=csv",
],
"momentum_view.display_stoch",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
fastkperiod=1,
slowdperiod=2,
slowkperiod=3,
export="csv",
),
),
(
"call_fisher",
[
"1",
"--export=csv",
],
"momentum_view.display_fisher",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
length=1,
export="csv",
),
),
(
"call_cg",
[
"1",
"--export=csv",
],
"momentum_view.display_cg",
[],
dict(
s_ticker="MOCK_TICKER",
series=MOCK_STOCK_DF["Adj Close"],
length=1,
export="csv",
),
),
(
"call_adx",
[
"1",
"--scalar=2",
"--drift=3",
"--export=csv",
],
"trend_indicators_view.display_adx",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
length=1,
scalar=2,
drift=3,
export="csv",
),
),
(
"call_aroon",
[
"1",
"--scalar=2",
"--export=csv",
],
"trend_indicators_view.display_aroon",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
length=1,
scalar=2,
export="csv",
),
),
(
"call_bbands",
[
"1",
"--std=2",
"--mamode=MOCK_MAMODE",
"--export=csv",
],
"volatility_view.display_bbands",
[],
dict(
ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
length=1,
n_std=2,
mamode="MOCK_MAMODE",
export="csv",
),
),
(
"call_donchian",
[
"--length_upper=1",
"--length_lower=2",
"--export=csv",
],
"volatility_view.display_donchian",
[],
dict(
ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
upper_length=1,
lower_length=2,
export="csv",
),
),
(
"call_kc",
[
"1",
"--scalar=2",
"--mamode=sma",
"--offset=3",
"--export=csv",
],
"volatility_view.view_kc",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
length=1,
scalar=2,
mamode="sma",
offset=3,
export="csv",
),
),
(
"call_ad",
[
"--open",
"--export=csv",
],
"volume_view.display_ad",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
use_open=True,
export="csv",
),
),
(
"call_adosc",
[
"--open",
"--fast_length=1",
"--slow_length=2",
"--export=csv",
],
"volume_view.display_adosc",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
use_open=True,
fast=1,
slow=2,
export="csv",
),
),
(
"call_obv",
[
"--export=csv",
],
"volume_view.display_obv",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
export="csv",
),
),
(
"call_fib",
[
"1",
"--start=2021-12-01",
"--end=2021-12-02",
"--export=csv",
],
"custom_indicators_view.fibonacci_retracement",
[],
dict(
s_ticker="MOCK_TICKER",
ohlc=MOCK_STOCK_DF,
period=1,
start_date=datetime.strptime("2021-12-01", "%Y-%m-%d"),
end_date=datetime.strptime("2021-12-02", "%Y-%m-%d"),
export="csv",
),
),
],
)
def test_call_func(
tested_func, mocked_func, other_args, called_args, called_kwargs, mocker
):
path_controller = "openbb_terminal.stocks.technical_analysis.ta_controller"
if mocked_func:
mock = mocker.Mock()
mocker.patch(
target=f"{path_controller}.{mocked_func}",
new=mock,
)
controller = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=MOCK_STOCK_DF,
queue=None,
)
controller.screen_tickers = ["PM"]
getattr(controller, tested_func)(other_args)
if called_args or called_kwargs:
mock.assert_called_once_with(*called_args, **called_kwargs)
else:
mock.assert_called_once()
else:
controller = ta_controller.TechnicalAnalysisController(
ticker="MOCK_TICKER",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="MOCK_INTERVAL",
stock=EMPTY_DF,
queue=None,
)
controller.screen_tickers = ["PM"]
getattr(controller, tested_func)(other_args)
@pytest.mark.vcr
def test_call_load(mocker):
# FORCE SINGLE THREADING
yf_download = ta_controller.stocks_helper.yf.download
def mock_yf_download(*args, **kwargs):
kwargs["threads"] = False
return yf_download(*args, **kwargs)
mocker.patch("yfinance.download", side_effect=mock_yf_download)
controller = ta_controller.TechnicalAnalysisController(
ticker="TSLA",
start=datetime.strptime("2021-12-01", "%Y-%m-%d"),
interval="1440",
stock=pd.DataFrame(),
queue=None,
)
other_args = [
"TSLA",
"--start=2021-12-17",
"--end=2021-12-18",
]
controller.call_load(other_args=other_args)
``` |
{
"source": "23jura23/NeMo",
"score": 2
} |
#### File: tts/torch/helpers.py
```python
import functools
import numpy as np
import torch
from scipy import ndimage
from scipy.stats import betabinom
class BetaBinomialInterpolator:
"""
This module calculates alignment prior matrices (based on beta-binomial distribution) using cached popular sizes and image interpolation.
The implementation is taken from https://github.com/NVIDIA/DeepLearningExamples.
"""
def __init__(self, round_mel_len_to=100, round_text_len_to=20):
self.round_mel_len_to = round_mel_len_to
self.round_text_len_to = round_text_len_to
self.bank = functools.lru_cache(beta_binomial_prior_distribution)
def round(self, val, to):
return max(1, int(np.round((val + 1) / to))) * to
def __call__(self, w, h):
bw = self.round(w, to=self.round_mel_len_to)
bh = self.round(h, to=self.round_text_len_to)
ret = ndimage.zoom(self.bank(bw, bh).T, zoom=(w / bw, h / bh), order=1)
assert ret.shape[0] == w, ret.shape
assert ret.shape[1] == h, ret.shape
return ret
def general_padding(item, item_len, max_len, pad_value=0):
if item_len < max_len:
item = torch.nn.functional.pad(item, (0, max_len - item_len), value=pad_value)
return item
def beta_binomial_prior_distribution(phoneme_count, mel_count, scaling_factor=1.0):
x = np.arange(0, phoneme_count)
mel_text_probs = []
for i in range(1, mel_count + 1):
a, b = scaling_factor * i, scaling_factor * (mel_count + 1 - i)
mel_i_prob = betabinom(phoneme_count.detach().numpy(), a, b).pmf(x)
mel_text_probs.append(mel_i_prob)
return np.array(mel_text_probs)
``` |
{
"source": "23maverick23/oa-data",
"score": 2
} |
#### File: 23maverick23/oa-data/app.py
```python
from flask import (Flask, render_template, request, jsonify)
from flask_restful import (Resource, Api)
from .reports import report87, report88, report91, report92, report93, report94, report95
app = Flask(__name__)
api = Api(app)
class Home(Resource):
def get(self):
return jsonify({"hello": "world"})
class Report87(Resource):
def get(self):
return jsonify(report87.get_data())
class Report88(Resource):
def get(self):
return jsonify(report88.get_data())
class Report91(Resource):
def get(self):
return jsonify(report91.get_data())
class Report92(Resource):
def get(self):
return jsonify(report92.get_data())
class Report93(Resource):
def get(self):
return jsonify(report93.get_data())
class Report94(Resource):
def get(self):
return jsonify(report94.get_data())
class Report95(Resource):
def get(self):
return jsonify(report95.get_data())
api.add_resource(Home, '/')
api.add_resource(Report87, '/api/report87')
api.add_resource(Report88, '/api/report88')
api.add_resource(Report91, '/api/report91')
api.add_resource(Report92, '/api/report92')
api.add_resource(Report93, '/api/report93')
api.add_resource(Report94, '/api/report94')
api.add_resource(Report95, '/api/report95')
if __name__ == '__main__':
app.run(
debug=True,
host='0.0.0.0',
use_reloader=True
)
``` |
{
"source": "23maverick23/project-euler",
"score": 4
} |
#### File: project-euler/problems/problem-001.py
```python
def run():
'''If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all
the multiples of 3 or 5 below 1000.
'''
print(sum([num if num % 3 == 0 or num % 5 == 0 else 0 for num in range(1, 1000)]))
if __name__ == "__main__":
run()
``` |
{
"source": "23pointsNorth/chainer",
"score": 3
} |
#### File: examples/mnist/train_mnist_model_parallel.py
```python
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import train_mnist
# Network definition
class ParallelMLP(chainer.Chain):
def __init__(self, n_units, n_out, gpu0, gpu1):
super(ParallelMLP, self).__init__()
self.gpu0 = gpu0
self.gpu1 = gpu1
with self.init_scope():
# the input size, 784, is inferred
self.first0 = train_mnist.MLP(n_units // 2, n_units).to_gpu(gpu0)
self.first1 = train_mnist.MLP(n_units // 2, n_units).to_gpu(gpu1)
# the input size, n_units, is inferred
self.second0 = train_mnist.MLP(n_units // 2, n_out).to_gpu(gpu0)
self.second1 = train_mnist.MLP(n_units // 2, n_out).to_gpu(gpu1)
def forward(self, x):
# assume x is on gpu0
x1 = F.copy(x, self.gpu1)
z0 = self.first0(x)
z1 = self.first1(x1)
# synchronize
h0 = z0 + F.copy(z1, self.gpu0)
h1 = z1 + F.copy(z0, self.gpu1)
y0 = self.second0(F.relu(h0))
y1 = self.second1(F.relu(h1))
# synchronize
y = y0 + F.copy(y1, self.gpu0)
return y # output is on gpu0
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', default=20, type=int,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu0', '-g', default=0, type=int,
help='First GPU ID')
parser.add_argument('--gpu1', '-G', default=1, type=int,
help='Second GPU ID')
parser.add_argument('--out', '-o', default='result_parallel',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', default=1000, type=int,
help='Number of units')
args = parser.parse_args()
print('GPU: {}, {}'.format(args.gpu0, args.gpu1))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# See train_mnist.py for the meaning of these lines
model = L.Classifier(ParallelMLP(args.unit, 10, args.gpu0, args.gpu1))
chainer.backends.cuda.get_device_from_id(args.gpu0).use()
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu0)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu0))
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
``` |
{
"source": "23pointsNorth/gym",
"score": 3
} |
#### File: envs/box2d/bipedal_walker.py
```python
import sys, math
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import colorize, seeding
# This is simple 4-joints walker robot environment.
#
# There are two versions:
#
# - Normal, with slightly uneven terrain.
#
# - Hardcore with ladders, stumps, pitfalls.
#
# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,
# it gets -100. Applying motor torque costs a small amount of points, more optimal agent
# will get better score.
#
# Heuristic is provided for testing, it's also useful to get demonstrations to
# learn from. To run heuristic:
#
# python gym/envs/box2d/bipedal_walker.py
#
# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,
# position of joints and joints angular speed, legs contact with ground, and 10 lidar
# rangefinder measurements to help to deal with the hardcore version. There's no coordinates
# in the state vector. Lidar is less useful in normal version, but it works.
#
# To solve the game you need to get 300 points in 1600 time steps.
#
# To solve hardcore version you need 300 points in 2000 time steps.
#
# Created by <NAME>. Licensed on the same terms as the rest of OpenAI Gym.
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MOTORS_TORQUE = 80
SPEED_HIP = 4
SPEED_KNEE = 6
LIDAR_RANGE = 160/SCALE
INITIAL_RANDOM = 5
HULL_POLY =[
(-30,+9), (+6,+9), (+34,+1),
(+34,-8), (-30,-8)
]
LEG_DOWN = -8/SCALE
LEG_W, LEG_H = 8/SCALE, 34/SCALE
VIEWPORT_W = 600
VIEWPORT_H = 400
TERRAIN_STEP = 14/SCALE
TERRAIN_LENGTH = 200 # in steps
TERRAIN_HEIGHT = VIEWPORT_H/SCALE/4
TERRAIN_GRASS = 10 # low long are grass spots, in steps
TERRAIN_STARTPAD = 20 # in steps
FRICTION = 2.5
HULL_FD = fixtureDef(
shape=polygonShape(vertices=[ (x/SCALE,y/SCALE) for x,y in HULL_POLY ]),
density=5.0,
friction=0.1,
categoryBits=0x0020,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
LEG_FD = fixtureDef(
shape=polygonShape(box=(LEG_W/2, LEG_H/2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
LOWER_FD = fixtureDef(
shape=polygonShape(box=(0.8*LEG_W/2, LEG_H/2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.hull==contact.fixtureA.body or self.env.hull==contact.fixtureB.body:
self.env.game_over = True
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = True
def EndContact(self, contact):
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = False
class BipedalWalker(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FPS
}
hardcore = False
def __init__(self):
self.seed()
self.viewer = None
self.world = Box2D.b2World()
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape = polygonShape(vertices=
[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction = FRICTION)
self.fd_edge = fixtureDef(
shape = edgeShape(vertices=
[(0, 0),
(1, 1)]),
friction = FRICTION,
categoryBits=0x0001,
)
self.reset()
high = np.array([np.inf]*24)
self.action_space = spaces.Box(np.array([-1,-1,-1,-1]), np.array([+1,+1,+1,+1]))
self.observation_space = spaces.Box(-high, high)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.terrain: return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.world.DestroyBody(self.hull)
self.hull = None
for leg in self.legs:
self.world.DestroyBody(leg)
self.legs = []
self.joints = []
def _generate_terrain(self, hardcore):
GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)
state = GRASS
velocity = 0.0
y = TERRAIN_HEIGHT
counter = TERRAIN_STARTPAD
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
for i in range(TERRAIN_LENGTH):
x = i*TERRAIN_STEP
self.terrain_x.append(x)
if state==GRASS and not oneshot:
velocity = 0.8*velocity + 0.01*np.sign(TERRAIN_HEIGHT - y)
if i > TERRAIN_STARTPAD: velocity += self.np_random.uniform(-1, 1)/SCALE #1
y += velocity
elif state==PIT and oneshot:
counter = self.np_random.randint(3, 5)
poly = [
(x, y),
(x+TERRAIN_STEP, y),
(x+TERRAIN_STEP, y-4*TERRAIN_STEP),
(x, y-4*TERRAIN_STEP),
]
self.fd_polygon.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
self.fd_polygon.shape.vertices=[(p[0]+TERRAIN_STEP*counter,p[1]) for p in poly]
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
counter += 2
original_y = y
elif state==PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4*TERRAIN_STEP
elif state==STUMP and oneshot:
counter = self.np_random.randint(1, 3)
poly = [
(x, y),
(x+counter*TERRAIN_STEP, y),
(x+counter*TERRAIN_STEP, y+counter*TERRAIN_STEP),
(x, y+counter*TERRAIN_STEP),
]
self.fd_polygon.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
elif state==STAIRS and oneshot:
stair_height = +1 if self.np_random.rand() > 0.5 else -1
stair_width = self.np_random.randint(4, 5)
stair_steps = self.np_random.randint(3, 5)
original_y = y
for s in range(stair_steps):
poly = [
(x+( s*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),
(x+((1+s)*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),
(x+((1+s)*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),
(x+( s*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),
]
self.fd_polygon.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_polygon)
t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)
self.terrain.append(t)
counter = stair_steps*stair_width
elif state==STAIRS and not oneshot:
s = stair_steps*stair_width - counter - stair_height
n = s/stair_width
y = original_y + (n*stair_height)*TERRAIN_STEP
oneshot = False
self.terrain_y.append(y)
counter -= 1
if counter==0:
counter = self.np_random.randint(TERRAIN_GRASS/2, TERRAIN_GRASS)
if state==GRASS and hardcore:
state = self.np_random.randint(1, _STATES_)
oneshot = True
else:
state = GRASS
oneshot = True
self.terrain_poly = []
for i in range(TERRAIN_LENGTH-1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i+1], self.terrain_y[i+1])
]
self.fd_edge.shape.vertices=poly
t = self.world.CreateStaticBody(
fixtures = self.fd_edge)
color = (0.3, 1.0 if i%2==0 else 0.8, 0.3)
t.color1 = color
t.color2 = color
self.terrain.append(t)
color = (0.4, 0.6, 0.3)
poly += [ (poly[1][0], 0), (poly[0][0], 0) ]
self.terrain_poly.append( (poly, color) )
self.terrain.reverse()
def _generate_clouds(self):
# Sorry for the clouds, couldn't resist
self.cloud_poly = []
for i in range(TERRAIN_LENGTH//20):
x = self.np_random.uniform(0, TERRAIN_LENGTH)*TERRAIN_STEP
y = VIEWPORT_H/SCALE*3/4
poly = [
(x+15*TERRAIN_STEP*math.sin(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP),
y+ 5*TERRAIN_STEP*math.cos(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP) )
for a in range(5) ]
x1 = min( [p[0] for p in poly] )
x2 = max( [p[0] for p in poly] )
self.cloud_poly.append( (poly,x1,x2) )
def reset(self):
self._destroy()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.game_over = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
W = VIEWPORT_W/SCALE
H = VIEWPORT_H/SCALE
self._generate_terrain(self.hardcore)
self._generate_clouds()
init_x = TERRAIN_STEP*TERRAIN_STARTPAD/2
init_y = TERRAIN_HEIGHT+2*LEG_H
self.hull = self.world.CreateDynamicBody(
position = (init_x, init_y),
fixtures = HULL_FD
)
self.hull.color1 = (0.5,0.4,0.9)
self.hull.color2 = (0.3,0.3,0.5)
self.hull.ApplyForceToCenter((self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True)
self.legs = []
self.joints = []
for i in [-1,+1]:
leg = self.world.CreateDynamicBody(
position = (init_x, init_y - LEG_H/2 - LEG_DOWN),
angle = (i*0.05),
fixtures = LEG_FD
)
leg.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)
leg.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=leg,
localAnchorA=(0, LEG_DOWN),
localAnchorB=(0, LEG_H/2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed = i,
lowerAngle = -0.8,
upperAngle = 1.1,
)
self.legs.append(leg)
self.joints.append(self.world.CreateJoint(rjd))
lower = self.world.CreateDynamicBody(
position = (init_x, init_y - LEG_H*3/2 - LEG_DOWN),
angle = (i*0.05),
fixtures = LOWER_FD
)
lower.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)
lower.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)
rjd = revoluteJointDef(
bodyA=leg,
bodyB=lower,
localAnchorA=(0, -LEG_H/2),
localAnchorB=(0, LEG_H/2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed = 1,
lowerAngle = -1.6,
upperAngle = -0.1,
)
lower.ground_contact = False
self.legs.append(lower)
self.joints.append(self.world.CreateJoint(rjd))
self.drawlist = self.terrain + self.legs + [self.hull]
class LidarCallback(Box2D.b2.rayCastCallback):
def ReportFixture(self, fixture, point, normal, fraction):
if (fixture.filterData.categoryBits & 1) == 0:
return 1
self.p2 = point
self.fraction = fraction
return 0
self.lidar = [LidarCallback() for _ in range(10)]
return self.step(np.array([0,0,0,0]))[0]
def step(self, action):
#self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help
control_speed = False # Should be easier as well
if control_speed:
self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1))
else:
self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))
self.joints[0].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))
self.joints[1].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))
self.joints[2].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))
self.joints[3].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1))
self.world.Step(1.0/FPS, 6*30, 2*30)
pos = self.hull.position
vel = self.hull.linearVelocity
for i in range(10):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5*i/10.0)*LIDAR_RANGE,
pos[1] - math.cos(1.5*i/10.0)*LIDAR_RANGE)
self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.
2.0*self.hull.angularVelocity/FPS,
0.3*vel.x*(VIEWPORT_W/SCALE)/FPS, # Normalized to get -1..1 range
0.3*vel.y*(VIEWPORT_H/SCALE)/FPS,
self.joints[0].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
self.joints[0].speed / SPEED_HIP,
self.joints[1].angle + 1.0,
self.joints[1].speed / SPEED_KNEE,
1.0 if self.legs[1].ground_contact else 0.0,
self.joints[2].angle,
self.joints[2].speed / SPEED_HIP,
self.joints[3].angle + 1.0,
self.joints[3].speed / SPEED_KNEE,
1.0 if self.legs[3].ground_contact else 0.0
]
state += [l.fraction for l in self.lidar]
assert len(state)==24
self.scroll = pos.x - VIEWPORT_W/SCALE/5
shaping = 130*pos[0]/SCALE # moving forward is a way to receive reward (normalized to get 300 on completion)
shaping -= 5.0*abs(state[0]) # keep head straight, other than that and falling, any behavior is unpunished
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)
# normalized to about -50.0 using heuristic, more optimal agent should spend less
done = False
if self.game_over or pos[0] < 0:
reward = -100
done = True
if pos[0] > (TERRAIN_LENGTH-TERRAIN_GRASS)*TERRAIN_STEP:
done = True
return np.array(state), reward, done, {}
def render(self, mode='human'):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(self.scroll, VIEWPORT_W/SCALE + self.scroll, 0, VIEWPORT_H/SCALE)
self.viewer.draw_polygon( [
(self.scroll, 0),
(self.scroll+VIEWPORT_W/SCALE, 0),
(self.scroll+VIEWPORT_W/SCALE, VIEWPORT_H/SCALE),
(self.scroll, VIEWPORT_H/SCALE),
], color=(0.9, 0.9, 1.0) )
for poly,x1,x2 in self.cloud_poly:
if x2 < self.scroll/2: continue
if x1 > self.scroll/2 + VIEWPORT_W/SCALE: continue
self.viewer.draw_polygon( [(p[0]+self.scroll/2, p[1]) for p in poly], color=(1,1,1))
for poly, color in self.terrain_poly:
if poly[1][0] < self.scroll: continue
if poly[0][0] > self.scroll + VIEWPORT_W/SCALE: continue
self.viewer.draw_polygon(poly, color=color)
self.lidar_render = (self.lidar_render+1) % 100
i = self.lidar_render
if i < 2*len(self.lidar):
l = self.lidar[i] if i < len(self.lidar) else self.lidar[len(self.lidar)-i-1]
self.viewer.draw_polyline( [l.p1, l.p2], color=(1,0,0), linewidth=1 )
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans*f.shape.pos)
self.viewer.draw_circle(f.shape.radius, 30, color=obj.color1).add_attr(t)
self.viewer.draw_circle(f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2).add_attr(t)
else:
path = [trans*v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)
flagy1 = TERRAIN_HEIGHT
flagy2 = flagy1 + 50/SCALE
x = TERRAIN_STEP*3
self.viewer.draw_polyline( [(x, flagy1), (x, flagy2)], color=(0,0,0), linewidth=2 )
f = [(x, flagy2), (x, flagy2-10/SCALE), (x+25/SCALE, flagy2-5/SCALE)]
self.viewer.draw_polygon(f, color=(0.9,0.2,0) )
self.viewer.draw_polyline(f + [f[0]], color=(0,0,0), linewidth=2 )
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class BipedalWalkerHardcore(BipedalWalker):
hardcore = True
if __name__=="__main__":
# Heurisic: suboptimal, have no notion of balance.
env = BipedalWalker()
env.reset()
steps = 0
total_reward = 0
a = np.array([0.0, 0.0, 0.0, 0.0])
STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1,2,3
SPEED = 0.29 # Will fall forward on higher speed
state = STAY_ON_ONE_LEG
moving_leg = 0
supporting_leg = 1 - moving_leg
SUPPORT_KNEE_ANGLE = +0.1
supporting_knee_angle = SUPPORT_KNEE_ANGLE
while True:
s, r, done, info = env.step(a)
total_reward += r
if steps % 20 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
print("hull " + str(["{:+0.2f}".format(x) for x in s[0:4] ]))
print("leg0 " + str(["{:+0.2f}".format(x) for x in s[4:9] ]))
print("leg1 " + str(["{:+0.2f}".format(x) for x in s[9:14]]))
steps += 1
contact0 = s[8]
contact1 = s[13]
moving_s_base = 4 + 5*moving_leg
supporting_s_base = 4 + 5*supporting_leg
hip_targ = [None,None] # -0.8 .. +1.1
knee_targ = [None,None] # -0.6 .. +0.9
hip_todo = [0.0, 0.0]
knee_todo = [0.0, 0.0]
if state==STAY_ON_ONE_LEG:
hip_targ[moving_leg] = 1.1
knee_targ[moving_leg] = -0.6
supporting_knee_angle += 0.03
if s[2] > SPEED: supporting_knee_angle += 0.03
supporting_knee_angle = min( supporting_knee_angle, SUPPORT_KNEE_ANGLE )
knee_targ[supporting_leg] = supporting_knee_angle
if s[supporting_s_base+0] < 0.10: # supporting leg is behind
state = PUT_OTHER_DOWN
if state==PUT_OTHER_DOWN:
hip_targ[moving_leg] = +0.1
knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE
knee_targ[supporting_leg] = supporting_knee_angle
if s[moving_s_base+4]:
state = PUSH_OFF
supporting_knee_angle = min( s[moving_s_base+2], SUPPORT_KNEE_ANGLE )
if state==PUSH_OFF:
knee_targ[moving_leg] = supporting_knee_angle
knee_targ[supporting_leg] = +1.0
if s[supporting_s_base+2] > 0.88 or s[2] > 1.2*SPEED:
state = STAY_ON_ONE_LEG
moving_leg = 1 - moving_leg
supporting_leg = 1 - moving_leg
if hip_targ[0]: hip_todo[0] = 0.9*(hip_targ[0] - s[4]) - 0.25*s[5]
if hip_targ[1]: hip_todo[1] = 0.9*(hip_targ[1] - s[9]) - 0.25*s[10]
if knee_targ[0]: knee_todo[0] = 4.0*(knee_targ[0] - s[6]) - 0.25*s[7]
if knee_targ[1]: knee_todo[1] = 4.0*(knee_targ[1] - s[11]) - 0.25*s[12]
hip_todo[0] -= 0.9*(0-s[0]) - 1.5*s[1] # PID to keep head strait
hip_todo[1] -= 0.9*(0-s[0]) - 1.5*s[1]
knee_todo[0] -= 15.0*s[3] # vertical speed, to damp oscillations
knee_todo[1] -= 15.0*s[3]
a[0] = hip_todo[0]
a[1] = knee_todo[0]
a[2] = hip_todo[1]
a[3] = knee_todo[1]
a = np.clip(0.5*a, -1.0, 1.0)
env.render()
if done: break
```
#### File: envs/mujoco/walker2d.py
```python
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, "walker2d.xml", 4)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt)
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
done = not (height > 0.8 and height < 2.0 and
ang > -1.0 and ang < 1.0)
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos[1:], np.clip(qvel, -10, 10)]).ravel()
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] += .8
self.viewer.cam.elevation = -20
```
#### File: gym/spaces/multi_discrete.py
```python
import gym
import numpy as np
class MultiDiscrete(gym.Space):
def __init__(self, nvec):
"""
nvec: vector of counts of each categorical variable
"""
self.nvec = np.asarray(nvec, dtype=np.int32)
assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'
gym.Space.__init__(self, (self.nvec.size,), np.int8)
def sample(self):
return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)
def contains(self, x):
return (x < self.nvec).all() and x.dtype.kind in 'ui'
def to_jsonable(self, sample_n):
return [sample.tolist() for sample in sample_n]
def from_jsonable(self, sample_n):
return np.array(sample_n)
``` |
{
"source": "23rd/ungoogled-chromium",
"score": 2
} |
#### File: ungoogled-chromium/utils/downloads.py
```python
import argparse
import configparser
import enum
import hashlib
import shutil
import subprocess
import sys
import urllib.request
from pathlib import Path
from _common import ENCODING, USE_REGISTRY, ExtractorEnum, get_logger, \
get_chromium_version, add_common_params
from _extraction import extract_tar_file, extract_with_7z, extract_with_winrar
sys.path.insert(0, str(Path(__file__).parent / 'third_party'))
import schema #pylint: disable=wrong-import-position
sys.path.pop(0)
# Constants
class HashesURLEnum(str, enum.Enum):
"""Enum for supported hash URL schemes"""
chromium = 'chromium'
class HashMismatchError(BaseException):
"""Exception for computed hashes not matching expected hashes"""
class DownloadInfo: #pylint: disable=too-few-public-methods
"""Representation of an downloads.ini file for downloading files"""
_hashes = ('md5', 'sha1', 'sha256', 'sha512')
hash_url_delimiter = '|'
_nonempty_keys = ('url', 'download_filename')
_optional_keys = (
'version',
'strip_leading_dirs',
)
_passthrough_properties = (*_nonempty_keys, *_optional_keys, 'extractor', 'output_path')
_ini_vars = {
'_chromium_version': get_chromium_version(),
}
@staticmethod
def _is_hash_url(value):
return value.count(DownloadInfo.hash_url_delimiter) == 2 and value.split(
DownloadInfo.hash_url_delimiter)[0] in iter(HashesURLEnum)
_schema = schema.Schema({
schema.Optional(schema.And(str, len)): {
**{x: schema.And(str, len)
for x in _nonempty_keys},
'output_path': (lambda x: str(Path(x).relative_to(''))),
**{schema.Optional(x): schema.And(str, len)
for x in _optional_keys},
schema.Optional('extractor'): schema.Or(ExtractorEnum.TAR, ExtractorEnum.SEVENZIP,
ExtractorEnum.WINRAR),
schema.Optional(schema.Or(*_hashes)): schema.And(str, len),
schema.Optional('hash_url'): lambda x: DownloadInfo._is_hash_url(x), #pylint: disable=unnecessary-lambda
}
})
class _DownloadsProperties: #pylint: disable=too-few-public-methods
def __init__(self, section_dict, passthrough_properties, hashes):
self._section_dict = section_dict
self._passthrough_properties = passthrough_properties
self._hashes = hashes
def has_hash_url(self):
"""
Returns a boolean indicating whether the current
download has a hash URL"""
return 'hash_url' in self._section_dict
def __getattr__(self, name):
if name in self._passthrough_properties:
return self._section_dict.get(name, fallback=None)
if name == 'hashes':
hashes_dict = dict()
for hash_name in (*self._hashes, 'hash_url'):
value = self._section_dict.get(hash_name, fallback=None)
if value:
if hash_name == 'hash_url':
value = value.split(DownloadInfo.hash_url_delimiter)
hashes_dict[hash_name] = value
return hashes_dict
raise AttributeError('"{}" has no attribute "{}"'.format(type(self).__name__, name))
def _parse_data(self, path):
"""
Parses an INI file located at path
Raises schema.SchemaError if validation fails
"""
def _section_generator(data):
for section in data:
if section == configparser.DEFAULTSECT:
continue
yield section, dict(
filter(lambda x: x[0] not in self._ini_vars, data.items(section)))
new_data = configparser.ConfigParser(defaults=self._ini_vars)
with path.open(encoding=ENCODING) as ini_file:
new_data.read_file(ini_file, source=str(path))
try:
self._schema.validate(dict(_section_generator(new_data)))
except schema.SchemaError as exc:
get_logger().error('downloads.ini failed schema validation (located in %s)', path)
raise exc
return new_data
def __init__(self, ini_paths):
"""Reads an iterable of pathlib.Path to download.ini files"""
self._data = configparser.ConfigParser()
for path in ini_paths:
self._data.read_dict(self._parse_data(path))
def __getitem__(self, section):
"""
Returns an object with keys as attributes and
values already pre-processed strings
"""
return self._DownloadsProperties(self._data[section], self._passthrough_properties,
self._hashes)
def __contains__(self, item):
"""
Returns True if item is a name of a section; False otherwise.
"""
return self._data.has_section(item)
def __iter__(self):
"""Returns an iterator over the section names"""
return iter(self._data.sections())
def properties_iter(self):
"""Iterator for the download properties sorted by output path"""
return sorted(
map(lambda x: (x, self[x]), self), key=(lambda x: str(Path(x[1].output_path))))
class _UrlRetrieveReportHook: #pylint: disable=too-few-public-methods
"""Hook for urllib.request.urlretrieve to log progress information to console"""
def __init__(self):
self._max_len_printed = 0
self._last_percentage = None
def __call__(self, block_count, block_size, total_size):
# Use total_blocks to handle case total_size < block_size
# total_blocks is ceiling of total_size / block_size
# Ceiling division from: https://stackoverflow.com/a/17511341
total_blocks = -(-total_size // block_size)
if total_blocks > 0:
# Do not needlessly update the console. Since the console is
# updated synchronously, we don't want updating the console to
# bottleneck downloading. Thus, only refresh the output when the
# displayed value should change.
percentage = round(block_count / total_blocks, ndigits=3)
if percentage == self._last_percentage:
return
self._last_percentage = percentage
print('\r' + ' ' * self._max_len_printed, end='')
status_line = 'Progress: {:.1%} of {:,d} B'.format(percentage, total_size)
else:
downloaded_estimate = block_count * block_size
status_line = 'Progress: {:,d} B of unknown size'.format(downloaded_estimate)
self._max_len_printed = len(status_line)
print('\r' + status_line, end='')
def _download_via_urllib(url, file_path, show_progress, disable_ssl_verification):
reporthook = None
if show_progress:
reporthook = _UrlRetrieveReportHook()
if disable_ssl_verification:
import ssl
# TODO: Remove this or properly implement disabling SSL certificate verification
orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access
ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access
try:
urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)
finally:
# Try to reduce damage of hack by reverting original HTTPS context ASAP
if disable_ssl_verification:
ssl._create_default_https_context = orig_https_context #pylint: disable=protected-access
if show_progress:
print()
def _download_if_needed(file_path, url, show_progress, disable_ssl_verification):
"""
Downloads a file from url to the specified path file_path if necessary.
If show_progress is True, download progress is printed to the console.
"""
if file_path.exists():
get_logger().info('%s already exists. Skipping download.', file_path)
return
# File name for partially download file
tmp_file_path = file_path.with_name(file_path.name + '.partial')
if tmp_file_path.exists():
get_logger().debug('Resuming downloading URL %s ...', url)
else:
get_logger().debug('Downloading URL %s ...', url)
# Perform download
if shutil.which('axel'):
get_logger().debug('Using axel')
try:
subprocess.run(['axel', '-o', str(tmp_file_path), url], check=True)
except subprocess.CalledProcessError as exc:
get_logger().error('axel failed. Re-run the download command to resume downloading.')
raise exc
elif shutil.which('curl'):
get_logger().debug('Using curl')
try:
subprocess.run(['curl', '-L', '-o', str(tmp_file_path), '-C', '-', url], check=True)
except subprocess.CalledProcessError as exc:
get_logger().error('curl failed. Re-run the download command to resume downloading.')
raise exc
else:
get_logger().debug('Using urllib')
_download_via_urllib(url, tmp_file_path, show_progress, disable_ssl_verification)
# Download complete; rename file
tmp_file_path.rename(file_path)
def _chromium_hashes_generator(hashes_path):
with hashes_path.open(encoding=ENCODING) as hashes_file:
hash_lines = hashes_file.read().splitlines()
for hash_name, hash_hex, _ in map(lambda x: x.lower().split(' '), hash_lines):
if hash_name in hashlib.algorithms_available:
yield hash_name, hash_hex
else:
get_logger().warning('Skipping unknown hash algorithm: %s', hash_name)
def _get_hash_pairs(download_properties, cache_dir):
"""Generator of (hash_name, hash_hex) for the given download"""
for entry_type, entry_value in download_properties.hashes.items():
if entry_type == 'hash_url':
hash_processor, hash_filename, _ = entry_value
if hash_processor == 'chromium':
yield from _chromium_hashes_generator(cache_dir / hash_filename)
else:
raise ValueError('Unknown hash_url processor: %s' % hash_processor)
else:
yield entry_type, entry_value
def retrieve_downloads(download_info, cache_dir, show_progress, disable_ssl_verification=False):
"""
Retrieve downloads into the downloads cache.
download_info is the DowloadInfo of downloads to retrieve.
cache_dir is the pathlib.Path to the downloads cache.
show_progress is a boolean indicating if download progress is printed to the console.
disable_ssl_verification is a boolean indicating if certificate verification
should be disabled for downloads using HTTPS.
Raises FileNotFoundError if the downloads path does not exist.
Raises NotADirectoryError if the downloads path is not a directory.
"""
if not cache_dir.exists():
raise FileNotFoundError(cache_dir)
if not cache_dir.is_dir():
raise NotADirectoryError(cache_dir)
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Downloading "%s" to "%s" ...', download_name,
download_properties.download_filename)
download_path = cache_dir / download_properties.download_filename
_download_if_needed(download_path, download_properties.url, show_progress,
disable_ssl_verification)
if download_properties.has_hash_url():
get_logger().info('Downloading hashes for "%s"', download_name)
_, hash_filename, hash_url = download_properties.hashes['hash_url']
_download_if_needed(cache_dir / hash_filename, hash_url, show_progress,
disable_ssl_verification)
def check_downloads(download_info, cache_dir):
"""
Check integrity of the downloads cache.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path to the downloads cache.
Raises source_retrieval.HashMismatchError when the computed and expected hashes do not match.
"""
for download_name, download_properties in download_info.properties_iter():
get_logger().info('Verifying hashes for "%s" ...', download_name)
download_path = cache_dir / download_properties.download_filename
with download_path.open('rb') as file_obj:
archive_data = file_obj.read()
for hash_name, hash_hex in _get_hash_pairs(download_properties, cache_dir):
get_logger().debug('Verifying %s hash...', hash_name)
hasher = hashlib.new(hash_name, data=archive_data)
if not hasher.hexdigest().lower() == hash_hex.lower():
raise HashMismatchError(download_path)
def unpack_downloads(download_info, cache_dir, output_dir, extractors=None):
"""
Unpack downloads in the downloads cache to output_dir. Assumes all downloads are retrieved.
download_info is the DownloadInfo of downloads to unpack.
cache_dir is the pathlib.Path directory containing the download cache
output_dir is the pathlib.Path directory to unpack the downloads to.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR.
May raise undetermined exceptions during archive unpacking.
"""
for download_name, download_properties in download_info.properties_iter():
download_path = cache_dir / download_properties.download_filename
get_logger().info('Unpacking "%s" to %s ...', download_name,
download_properties.output_path)
extractor_name = download_properties.extractor or ExtractorEnum.TAR
if extractor_name == ExtractorEnum.SEVENZIP:
extractor_func = extract_with_7z
elif extractor_name == ExtractorEnum.WINRAR:
extractor_func = extract_with_winrar
elif extractor_name == ExtractorEnum.TAR:
extractor_func = extract_tar_file
else:
raise NotImplementedError(extractor_name)
if download_properties.strip_leading_dirs is None:
strip_leading_dirs_path = None
else:
strip_leading_dirs_path = Path(download_properties.strip_leading_dirs)
extractor_func(
archive_path=download_path,
output_dir=output_dir / Path(download_properties.output_path),
relative_to=strip_leading_dirs_path,
extractors=extractors)
def _add_common_args(parser):
parser.add_argument(
'-i',
'--ini',
type=Path,
nargs='+',
help='The downloads INI to parse for downloads. Can be specified multiple times.')
parser.add_argument(
'-c', '--cache', type=Path, required=True, help='Path to the directory to cache downloads.')
def _retrieve_callback(args):
retrieve_downloads(
DownloadInfo(args.ini), args.cache, args.show_progress, args.disable_ssl_verification)
try:
check_downloads(DownloadInfo(args.ini), args.cache)
except HashMismatchError as exc:
get_logger().error('File checksum does not match: %s', exc)
exit(1)
def _unpack_callback(args):
extractors = {
ExtractorEnum.SEVENZIP: args.sevenz_path,
ExtractorEnum.WINRAR: args.winrar_path,
ExtractorEnum.TAR: args.tar_path,
}
unpack_downloads(DownloadInfo(args.ini), args.cache, args.output, extractors)
def main():
"""CLI Entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
add_common_params(parser)
subparsers = parser.add_subparsers(title='Download actions', dest='action')
# retrieve
retrieve_parser = subparsers.add_parser(
'retrieve',
help='Retrieve and check download files',
description=('Retrieves and checks downloads without unpacking. '
'The downloader will attempt to use CLI commands axel or curl. '
'If they are not present, Python\'s urllib will be used. However, only '
'the CLI-based downloaders can be resumed if the download is aborted.'))
_add_common_args(retrieve_parser)
retrieve_parser.add_argument(
'--hide-progress-bar',
action='store_false',
dest='show_progress',
help='Hide the download progress.')
retrieve_parser.add_argument(
'--disable-ssl-verification',
action='store_true',
help='Disables certification verification for downloads using HTTPS.')
retrieve_parser.set_defaults(callback=_retrieve_callback)
# unpack
unpack_parser = subparsers.add_parser(
'unpack',
help='Unpack download files',
description='Verifies hashes of and unpacks download files into the specified directory.')
_add_common_args(unpack_parser)
unpack_parser.add_argument(
'--tar-path',
default='tar',
help=('(Linux and macOS only) Command or path to the BSD or GNU tar '
'binary for extraction. Default: %(default)s'))
unpack_parser.add_argument(
'--7z-path',
dest='sevenz_path',
default=USE_REGISTRY,
help=('Command or path to 7-Zip\'s "7z" binary. If "_use_registry" is '
'specified, determine the path from the registry. Default: %(default)s'))
unpack_parser.add_argument(
'--winrar-path',
dest='winrar_path',
default=USE_REGISTRY,
help=('Command or path to WinRAR\'s "winrar" binary. If "_use_registry" is '
'specified, determine the path from the registry. Default: %(default)s'))
unpack_parser.add_argument('output', type=Path, help='The directory to unpack to.')
unpack_parser.set_defaults(callback=_unpack_callback)
args = parser.parse_args()
args.callback(args)
if __name__ == '__main__':
main()
``` |
{
"source": "23rd/youtube-dl",
"score": 2
} |
#### File: youtube_dl/extractor/stitcher.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
str_or_none,
try_get,
url_or_none,
)
class StitcherBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?stitcher\.com/(?:podcast|show)/'
def _call_api(self, path, video_id, query):
resp = self._download_json(
'https://api.prod.stitcher.com/' + path,
video_id, query=query)
error_massage = try_get(resp, lambda x: x['errors'][0]['message'])
if error_massage:
raise ExtractorError(error_massage, expected=True)
return resp['data']
def _extract_description(self, data):
return clean_html(data.get('html_description') or data.get('description'))
def _extract_audio_url(self, episode):
return url_or_none(episode.get('audio_url') or episode.get('guid'))
def _extract_show_info(self, show):
return {
'thumbnail': show.get('image_base_url'),
'series': show.get('title'),
}
def _extract_episode(self, episode, audio_url, show_info):
info = {
'id': compat_str(episode['id']),
'display_id': episode.get('slug'),
'title': episode['title'].strip(),
'description': self._extract_description(episode),
'duration': int_or_none(episode.get('duration')),
'url': audio_url,
'vcodec': 'none',
'timestamp': int_or_none(episode.get('date_published')),
'season_number': int_or_none(episode.get('season')),
'season_id': str_or_none(episode.get('season_id')),
}
info.update(show_info)
return info
class StitcherIE(StitcherBaseIE):
_VALID_URL = StitcherBaseIE._VALID_URL_BASE + r'(?:[^/]+/)+e(?:pisode)?/(?:[^/#?&]+-)?(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
'md5': 'e9635098e0da10b21a0e2b85585530f6',
'info_dict': {
'id': '40789481',
'ext': 'mp3',
'title': 'Machine Learning Mastery and Cancer Clusters',
'description': 'md5:547adb4081864be114ae3831b4c2b42f',
'duration': 1604,
'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20151008',
'timestamp': 1444285800,
'series': 'Talking Machines',
},
}, {
'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
'info_dict': {
'id': '40846275',
'display_id': 'the-rare-hourlong-comedy-plus',
'ext': 'mp3',
'title': "The CW's 'Crazy Ex-Girlfriend'",
'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17',
'duration': 2235,
'thumbnail': r're:^https?://.*\.jpg',
},
'params': {
'skip_download': True,
},
'skip': 'Page Not Found',
}, {
# escaped title
'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
'only_matching': True,
}, {
'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
'only_matching': True,
}, {
'url': 'https://www.stitcher.com/show/threedom/episode/circles-on-a-stick-200212584',
'only_matching': True,
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
data = self._call_api(
'shows/episodes', audio_id, {'episode_ids': audio_id})
episode = data['episodes'][0]
audio_url = self._extract_audio_url(episode)
if not audio_url:
self.raise_login_required()
show = try_get(data, lambda x: x['shows'][0], dict) or {}
return self._extract_episode(
episode, audio_url, self._extract_show_info(show))
class StitcherShowIE(StitcherBaseIE):
_VALID_URL = StitcherBaseIE._VALID_URL_BASE + r'(?P<id>[^/#?&]+)/?(?:[?#&]|$)'
_TESTS = [{
'url': 'http://www.stitcher.com/podcast/the-talking-machines',
'info_dict': {
'id': 'the-talking-machines',
'title': 'Talking Machines',
'description': 'md5:831f0995e40f26c10231af39cf1ebf0b',
},
'playlist_mincount': 106,
}, {
'url': 'https://www.stitcher.com/show/the-talking-machines',
'only_matching': True,
}]
def _real_extract(self, url):
show_slug = self._match_id(url)
data = self._call_api(
'search/show/%s/allEpisodes' % show_slug, show_slug, {'count': 10000})
show = try_get(data, lambda x: x['shows'][0], dict) or {}
show_info = self._extract_show_info(show)
entries = []
for episode in (data.get('episodes') or []):
audio_url = self._extract_audio_url(episode)
if not audio_url:
continue
entries.append(self._extract_episode(episode, audio_url, show_info))
return self.playlist_result(
entries, show_slug, show.get('title'),
self._extract_description(show))
``` |
{
"source": "23subbhashit/Command-Line-Interface",
"score": 3
} |
#### File: Command-Line-Interface/Python/UDP.py
```python
import sys
from struct import *
import socket, networking, textwrap
def main():
#Creating raw socket to accept data packets from all ports
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
while True:
#Getting raw data and addr
raw_data, addr = s.recvfrom(65535)
#Getting the Ethernet Header
eth = eth_head(raw_data)
print('Frame :-')
print('Destination: {}, Source: {}, Protocol: {}'.format(eth[0], eth[1], eth[2]))
#Checks for IPV4 packets
if eth[2] == 8:
ipv4 = ipv4_header(eth[3])
print('\t - ' + 'IPV4 Packet: ')
print('\t\t - ' + 'Version: {}, Header Length: {}, TTL: {}'.format(ipv4[0], ipv4[1], ipv4[2]))
print('\t\t - ' + 'Protocol: {}, Source: {}, Target: {}'.format(ipv4[3], ipv4[4], ipv4[5]))
#Displaying the UDP data
if ipv4[3] == 17:
udp = udp_head(ipv4[6])
print('\t - ' + 'UDP Segment:')
print('\t\t - ' + 'Source Port: {}, Destination Port: {}, Length: {}'.format(udp[0], udp[1], udp[2]))
#Function for getting ethernet Header by passing the raw data packet
def eth_head(raw_data):
#unpacks ethernet header
dest, src, prototype = unpack('! 6s 6s H', raw_data[:14])
dest_mac = get_mac_addr(dest)
src_mac = get_mac_addr(src)
proto = socket.htons(prototype)
data = raw_data[14:]
return dest_mac, src_mac, proto, data
#Function for unpacking ip header
def ipv4_header(raw_data):
#Unpacking the ipv4 header
version_head_len = raw_data[0]
version = version_head_len >> 4
head_len = (version_head_len & 15) * 4
ttl, proto, src, target = unpack('! 8x B B 2x 4s 4s', raw_data[:20])
data = raw_data[head_len:]
src = get_ip(src)
target = get_ip(target)
return version, head_len, ttl, proto, src, target, data
#Function for unpacking UDP header
def udp_head(raw_data):
src_port, dest_port, size = unpack('! H H 2x H', raw_data[:8])
data = raw_data[8:]
return src_port, dest_port, size, data
#Returns mac address
def get_mac_addr(mac_raw):
byte_str = map('{:02x}'.format, mac_raw)
mac_addr = ':'.join(byte_str).upper()
return mac_addr
#Returns a proper IP address
def get_ip(addr):
return '.'.join(map(str,addr))
#Main function to run program
main()
``` |
{
"source": "23subbhashit/Virtual-pen",
"score": 3
} |
#### File: 23subbhashit/Virtual-pen/draw.py
```python
import cv2
import numpy as np
class drawingCanvas():
def __init__(self):
self.penrange = np.load('penrange.npy')
self.cap = cv2.VideoCapture(0)
self.canvas = None
self.x1,self.y1=0,0
self.val=1
self.draw()
def draw(self):
while True:
_, self.frame = self.cap.read()
self.frame = cv2.flip( self.frame,+1)
if self.canvas is None:
self.canvas = np.zeros_like(self.frame)
mask=self.CreateMask()
contours=self.ContourDetect(mask)
self.drawLine(contours)
self.display()
k = cv2.waitKey(1) & 0xFF
self.takeAction(k)
if k == 27:
break
def CreateMask(self):
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
lower_range = self.penrange[0]
upper_range = self.penrange[1]
mask = cv2.inRange(hsv, lower_range, upper_range)
return mask
def ContourDetect(self,mask):
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours
def drawLine(self,contours):
if contours and cv2.contourArea(max(contours, key = cv2.contourArea)) > 100:
c = max(contours, key = cv2.contourArea)
x2,y2,w,h = cv2.boundingRect(c)
if self.x1 == 0 and self.y1 == 0:
self.x1,self.y1= x2,y2
else:
self.canvas = cv2.line(self.canvas, (self.x1,self.y1),(x2,y2), [255*self.val,0,0], 10)
self.x1,self.y1= x2,y2
else:
self.x1,self.y1 =0,0
def display(self):
self.frame = cv2.add(self.frame,self.canvas)
cv2.imshow('frame',self.frame)
cv2.imshow('canvas',self.canvas)
def takeAction(self,k):
# When c is pressed clear the entire canvas
if k == ord('c'):
self.canvas = None
#press e to change between eraser mode and writing mode
if k==ord('e'):
self.val= int(not self.val)
if __name__ == '__main__':
drawingCanvas()
cv2.destroyAllWindows()
``` |
{
"source": "23subbhashit/Voice-assistant",
"score": 3
} |
#### File: 23subbhashit/Voice-assistant/assistant.py
```python
import speech_recognition as sr
import re
import wikipedia
from pyowm import OWM
import webbrowser
import subprocess
import smtplib
import random
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup as soup
import urllib.request
r=sr.Recognizer() #for speech recognition
greet_in= ("hello", "hi", "greetings", "sup", "what's up","hey")
greet_out= ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"]
def greeting(sentence):
for word in sentence.split():
if word.lower() in greet_in:
return random.choice(greet_out)
with sr.Microphone() as source:
print("say something...")
audio=r.listen(source)
try:
text=r.recognize_google(audio)
print("you said::",text)
except:
print("translation failed:.")
if 'tell me about' in text: #To know about something
exp= re.search('tell me about (.*)',text)
try:
if exp:
topic = exp.group(1)
ny = wikipedia.page(topic)
print(ny.content[:500])
except Exception as e:
pass
elif 'current weather' in text: #To know the current weather
exp = re.search('current weather in (.*)',text)
if exp:
city = exp.group(1)
API_key = '' #API key has to be generated
owm = OWM(API_key)
obs = owm.weather_at_place(city)
w = obs.get_weather()
k = w.get_status()
x = w.get_temperature(unit='celsius')
print("city:",city)
print("Temprature:",k)
print("max:",x["temp_max"])
elif "open" in text: #To open a url
exp=re.search("open (.*)",text)
if exp:
d=exp.group(1)
print(d)
url = 'https://www.' + d+".com"
webbrowser.open(url)
print("website is opened")
else:
pass
elif text.lower() in greet_in: #Greet
a=greeting(text.lower())
print(a)
elif 'launch' in text: #To open an app in your system
exp= re.search('launch (.*)', text)
if exp:
appname = exp.group(1)
appname1 = appname+".exe"
subprocess.Popen(["open", "-n", "/Applications/" + appname1], stdout=subprocess.PIPE)
elif 'time' in text: #Shows the current time
now = datetime.now()
print('Current time is %d hours %d minutes' % (now.hour, now.minute))
elif 'email' in text: #To email someone
sender=input("Please enter your email :")
receiver=input("Please enter receiver's email")
password=input("<PASSWORD>")
print('What should I say to him?')
with sr.Microphone() as source:
print("content")
b=r.listen(source)
try:
content=r.recognize_google(audio)
print("content is :",content)
except:
print("translation failed:.")
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login(sender,password)
mail.sendmail(sender,receiver,content)
mail.close()
print('Email has been sent successfuly. You can check your inbox.')
elif 'Google' in text: #To search anything in google
exp=re.search('search in Google (.*)',text)
if exp:
query=exp.group(1)
print(query)
browser = webdriver.Chrome()
browser.get('http://www.google.com')
search = browser.find_element_by_name('q')
search.send_keys(query)
search.send_keys(Keys.RETURN)
elif "song" in text:
exp=re.search("play me song (.*)",text) #To play a song in youtube
if exp:
song=exp.group(1)
print(song)
mysong = song
if mysong:
flag = 0
url = "https://www.youtube.com/results?search_query=" + mysong.replace(' ', '+')
response = urllib.request.urlopen(url)
html = response.read()
soup1 = soup(html,"lxml")
url_list = []
for vid in soup1.findAll(attrs={'class':'yt-uix-tile-link'}):
if ('https://www.youtube.com' + vid['href']).startswith("https://www.youtube.com/watch?v="):
flag = 1
final_url = 'https://www.youtube.com' + vid['href']
url_list.append(final_url)
url = url_list[1]
webbrowser.open(url)
if flag == 0:
print('I have not found anything in Youtube ')
``` |
{
"source": "23tareyg/neural-networks-and-deep-learning",
"score": 3
} |
#### File: neural-networks-and-deep-learning/tensorflow/titanic.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow.compat.v2.feature_column as fc
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#Load Data
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv') # training data
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv') # testing data
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
vocabulary = dftrain[feature_name].unique() # gets a list of all unique values from given feature column
feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
# Input function
def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
def input_function(): # inner function, this will be returned
ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df)) # create tf.data.Dataset object with data and its label
if shuffle:
ds = ds.shuffle(1000) # randomize order of data
ds = ds.batch(batch_size).repeat(num_epochs) # split dataset into batches of 32 and repeat process for number of epochs
return ds # return a batch of the dataset
return input_function # return a function object for use
train_input_fn = make_input_fn(dftrain, y_train) # here we will call the input_function that was returned to us to get a dataset object we can feed to the model
eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False)
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)
linear_est.train(train_input_fn) # train
result = list(linear_est.predict(eval_input_fn))
print(dfeval.loc[8])
print(y_eval.loc[8])
print(result[8]['probabilities'][1])
``` |
{
"source": "23tyagit/NeuraHealth",
"score": 2
} |
#### File: app/gui/patient_level_model.py
```python
import numpy as np
import pandas as pd
import regex as re
import itertools
import sklearn.metrics as sk
from functools import reduce
import pickle
def feature_engineering(notes):
data = []
seq_count = len(notes)
if (seq_count <= 11):
seq_count = 0
elif (seq_count <= 23):
seq_count = 1
elif (seq_count <= 45):
seq_count =2
else:
seq_count = 3
p_yes = len(notes[notes["pred"] == 2]) / len(notes)
p_no = len(notes[notes["pred"] == 0]) / len(notes)
p_ntr = len(notes[notes["pred"] == 1]) / len(notes)
curr_dict = {
"percent_yes" : p_yes,
"percent_no" : p_no,
"percent_neither" : p_ntr,
"sequence_count" : seq_count,
}
data.append(curr_dict)
patient_level_features = pd.DataFrame(data)
return patient_level_features
def run_patient_level(notes):
patient_level_features = feature_engineering(notes)
model = pickle.load(open(r"C:\Users\tanis\OneDrive - Phillips Exeter Academy\Data\Programming\APOE-SLAT\Web-Tool\app\gui\static\lr_12_26_patient_level.sav", "rb"))
pred = model.predict(patient_level_features)
proba = model.predict_proba(patient_level_features)
proba = str(proba)
proba = re.sub("\s+", ",", proba.strip())
print("proba:" , proba)
patient_level_features["pred"] = pred
patient_level_features["proba"] = str(proba)
return patient_level_features
``` |
{
"source": "23W/Labs",
"score": 3
} |
#### File: SS/L3/lab3_part2.py
```python
import os
import pandas
import matplotlib.pyplot as plt, numpy as np
def wetCounties(years, top_limit):
# read data
df = pandas.read_csv('precipitation.csv')
# rename indices
df = df.set_index(df['Country or Area'])
df.drop(['Country or Area'], axis="columns", inplace=True)
#slice years
df = df[years]
# add SUM column and sort by descending order
df["SUM"] = df.sum(axis=1)
sorted_df = df.sort_values(by="SUM", ascending=False)
# Wettest countries
tops_sum = sorted_df[0:top_limit]
tops = sorted_df[years][0:top_limit]
# Output: Wettest countries
years_title = "[{}..{}]".format(years[0], years[-1]) if len(years)>1 else str(years[0])
title = "Top {} wettest countries in {}".format(top_limit, years_title)
with open("./output/top{}wettest{}_data.txt".format(top_limit, years_title), 'w') as f:
print(title, file=f)
print(tops, file=f)
with open("./output/top{}wettest{}_sum.txt".format(top_limit, years_title), 'w') as f:
print(title, file=f)
print(tops_sum["SUM"], file=f)
# Graph: Top wettest countries
if len(years)>1:
tops_t = tops.transpose()
plt.figure(figsize=(10,8))
plt.title(title)
plt.xlabel("Years")
plt.ylabel("Precipitation (million cubic meters)")
plt.plot(tops_t, marker="o", linestyle="-")
plt.savefig("./output/top{}wettest{}_graph.png".format(top_limit, years_title))
# Chart: Top wettest countries
if top_limit>1:
title = "Comparision chart of top {} wettest countries in {}".format(top_limit, years_title)
tops_sums = tops_sum["SUM"]
plt.figure(figsize=(10,8))
plt.axis("equal")
plt.title(title, y=1.08)
plt.pie(tops_sums, labels=tops_sums.index, autopct="%1.2f%%", radius=1.25)
plt.savefig("./output/top{}wettest{}_comp_chart.png".format(top_limit, years_title))
title = "Full chart of top {} wettest countries in {}".format(top_limit, years_title)
tops_sums["Other"] = sorted_df["SUM"][top_limit:].sum()
plt.figure(figsize=(10,8))
plt.axis("equal")
plt.title(title, y=1.08)
plt.pie(tops_sums, labels=tops_sums.index, autopct="%1.2f%%", radius=1.25)
plt.savefig("./output/top{}wettest{}_other_chart.png".format(top_limit, years_title))
# Main
if not os.path.exists('output'):
os.makedirs('output')
range1 = ["1990"] # range [1990...1990]
range2 = ["{:d}".format(x) for x in range(1995, 2010)] # range [1995...2010)
wetCounties(range1, 5)
wetCounties(range2, 5)
``` |
{
"source": "240325184/KubeOperator",
"score": 2
} |
#### File: api/ansible_api/inventory.py
```python
from .ansible.inventory import BaseInventory
__all__ = [
'AdHocInventory', 'LocalModelInventory'
]
class AdHocInventory(BaseInventory):
"""
JMS Inventory is the manager with jumpserver assets, so you can
write you own manager, construct you inventory_obj
"""
def __init__(self, assets, nodes=None, run_as_admin=False,
run_as=None, become_info=None, vars=None):
"""
:param assets: ["uuid1", ]
:param run_as_admin: True 是否使用管理用户去执行, 每台服务器的管理用户可能不同
:param run_as: 是否统一使用某个系统用户去执行
:param become_info: 是否become成某个用户去执行
"""
self.assets = assets or []
self.nodes = nodes or []
self.run_as_admin = run_as_admin
self.run_as = run_as
self.become_info = become_info
self.vars = vars or {}
hosts, groups = self.parse_resource()
super().__init__(host_list=hosts, group_list=groups)
def parse_resource(self):
assets = self.get_all_assets()
hosts = []
nodes = set()
groups = []
for asset in assets:
info = self.convert_to_ansible(asset)
hosts.append(info)
nodes.update(set(asset.nodes.all()))
if self.become_info:
for host in hosts:
host.update(self.become_info)
for node in nodes:
groups.append({
'name': node.value,
'children': [n.value for n in node.get_children()]
})
return hosts, groups
def get_all_assets(self):
assets = set(self.assets)
for node in self.nodes:
_assets = set(node.get_all_assets())
assets.update(_assets)
return assets
def convert_to_ansible(self, asset):
info = {
'id': asset.id,
'hostname': asset.hostname,
'ip': asset.ip,
'port': asset.port,
'vars': dict(),
'groups': [],
}
if asset.domain and asset.domain.has_gateway():
info["vars"].update(self.make_proxy_command(asset))
if self.run_as_admin:
info.update(asset.get_auth_info())
if self.run_as:
info.update(self.get_run_user_info(asset))
for node in asset.nodes.all():
info["groups"].append(node.value)
for label in asset.labels.all():
info["vars"].update({
label.name: label.value
})
info["groups"].append("{}:{}".format(label.name, label.value))
if asset.domain:
info["vars"].update({
"domain": asset.domain.name,
})
info["groups"].append("domain_" + asset.domain.name)
for k, v in self.vars.items():
if not k.startswith('__'):
info['vars'].update({
k: v
})
host_vars = self.vars.get("__{}".format(asset.id), {})
for k, v in host_vars.items():
info['vars'].update({
k: v
})
return info
def get_run_user_info(self, asset):
info = self.run_as.get_auth(asset)._to_secret_json()
return info
@staticmethod
def make_proxy_command(asset):
gateway = asset.domain.random_gateway()
proxy_command_list = [
"ssh", "-p", str(gateway.port),
"{}@{}".format(gateway.username, gateway.ip),
"-W", "%h:%p", "-q",
]
if gateway.password:
proxy_command_list.insert(
0, "sshpass -p {}".format(gateway.password)
)
if gateway.private_key:
proxy_command_list.append("-i {}".format(gateway.private_key_file))
proxy_command = "'-o ProxyCommand={}'".format(
" ".join(proxy_command_list)
)
return {"ansible_ssh_common_args": proxy_command}
class JMSInventory(BaseInventory):
system = object()
def __init__(self, nodes, vars=None):
"""
:param nodes: {"node": {"asset": set(user1, user2)}}
:param vars:
"""
self.nodes = nodes or {}
self.vars = vars or {}
host_list, group_list = self.parse_resource()
super().__init__(host_list, group_list)
@classmethod
def from_json(cls, data):
host_list = data.get('host_list', [])
group_list = data.get('group_list', [])
super().__init__(host_list, group_list)
def parse_all_hosts(self):
host_list = []
for assets in self.nodes.values():
for asset in assets:
_vars = {
'ansible_ssh_host': asset.ip,
'ansible_ssh_port': asset.port,
}
if asset.domain and asset.domain.has_gateway():
_vars.update(self.make_proxy_command(asset))
for k, v in self.vars.items():
if not k.startswith('__'):
_vars.update({
k: v
})
host_vars = self.vars.get("__{}".format(asset.hostname), {})
for k, v in host_vars.items():
_vars.update({
k: v
})
host_list.append({
'hostname': asset.hostname,
'vars': _vars
})
return host_list
def parse_label(self):
pass
def parse_users(self):
pass
def parse_resource(self):
"""
host_list: [{
"hostname": "",
"vars": {},
},
]
group_list: [{
"name": "",
"hosts": ["",],
"children": ["",],
"vars": {}
},]
:return: host_list, group_list
"""
host_list = self.parse_all_hosts()
group_list = []
return host_list, group_list
@staticmethod
def get_run_user_info(user, asset):
info = user.get_auth(asset)._to_secret_json()
return info
@staticmethod
def make_proxy_command(asset):
gateway = asset.domain.random_gateway()
proxy_command_list = [
"ssh", "-p", str(gateway.port),
"{}@{}".format(gateway.username, gateway.ip),
"-W", "%h:%p", "-q",
]
if gateway.password:
proxy_command_list.insert(
0, "sshpass -p {}".format(gateway.password)
)
if gateway.private_key:
proxy_command_list.append("-i {}".format(gateway.private_key_file))
proxy_command = "'-o ProxyCommand={}'".format(
" ".join(proxy_command_list)
)
return {"ansible_ssh_common_args": proxy_command}
class LocalModelInventory(BaseInventory):
def __init__(self, inventory):
"""
:param inventory: .models Inventory Object
"""
self.inventory = inventory
data = self.parse_resource()
super().__init__(data)
def parse_resource(self):
groups = self._parse_groups()
hosts = self._parse_hosts()
return {'hosts': hosts, 'groups': groups}
def _parse_hosts(self):
hosts = []
_hosts = set(self.inventory.hosts)
for group in self.inventory.groups:
_hosts.update(set(group.hosts.all()))
for host in _hosts:
_vars = host.vars or {}
_vars.update({
'ansible_ssh_host': host.ip or host.name,
'ansible_ssh_port': host.port or 22,
'ansible_ssh_user': host.username,
'ansible_ssh_pass': <PASSWORD>,
'ansible_ssh_private_key_file': host.private_key_path
})
hosts.append({
'hostname': host.name,
'vars': _vars
})
return hosts
def _parse_groups(self):
groups = []
for group in self.inventory.groups:
groups.append({
'name': group.name,
'vars': group.vars or {},
'hosts': group.hosts.all().values_list('name', flat=True),
'children': group.children.all().values_list('name', flat=True)
})
return groups
class WithHostInfoInventory(BaseInventory):
def __init__(self, inventory_data):
self.inventory_data = inventory_data
data = self.parse_resource()
super().__init__(data)
def parse_resource(self):
groups = self._parse_groups()
hosts = self._parse_hosts()
return {'hosts': hosts, 'groups': groups}
def _parse_hosts(self):
hosts = []
_hosts = self.inventory_data.get('hosts', [])
for host in _hosts:
_vars = host.get('vars', {})
_vars.update({
'ansible_ssh_host': host.get('ip') or host['name'],
'ansible_ssh_port': host.get('port') or 22,
'ansible_ssh_user': host.get('username'),
'ansible_ssh_pass': <PASSWORD>('password'),
'ansible_ssh_private_key_file': host.get('private_key_path')
})
hosts.append({
'hostname': host['name'],
'vars': _vars
})
return hosts
def _parse_groups(self):
groups = []
for group in self.inventory_data.get('groups', []):
groups.append({
'name': group.get('name'),
'vars': group.get('vars', {}),
'hosts': group.get('hosts', []),
'children': group.get('children', [])
})
return groups
```
#### File: ansible_api/models/mixins.py
```python
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from common import models as common_models
from celery_api.utils import get_celery_task_log_path
from ..ctx import current_project, set_current_project
class ProjectResourceManager(models.Manager):
def get_queryset(self):
queryset = super(ProjectResourceManager, self).get_queryset()
if not current_project:
return queryset
if current_project.is_real():
queryset = queryset.filter(project=current_project.id)
return queryset
def create(self, **kwargs):
if 'project' not in kwargs and current_project.is_real():
kwargs['project'] = current_project._get_current_object()
return super().create(**kwargs)
def all(self):
if current_project:
return super().all()
else:
return self
def set_current_org(self, project):
set_current_project(project)
return self
class AbstractProjectResourceModel(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
project = models.ForeignKey('Project', on_delete=models.CASCADE)
objects = ProjectResourceManager()
name = 'Not-Sure'
class Meta:
abstract = True
def __str__(self):
return '{}: {}'.format(self.project, self.name)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if not hasattr(self, 'project') and current_project.is_real():
self.project = current_project._get_current_object()
return super().save(force_insert=force_insert, force_update=force_update,
using=using, update_fields=update_fields)
class AbstractExecutionModel(models.Model):
STATE_PENDING = 'PENDING'
STATE_STARTED = 'STARTED'
STATE_FAILURE = 'FAILURE'
STATE_SUCCESS = 'SUCCESS'
STATE_RETRY = 'RETRY'
STATUS_CHOICES = (
(STATE_PENDING, _('Pending')),
(STATE_STARTED, _('Started')),
(STATE_SUCCESS, _('Success')),
(STATE_FAILURE, _('Failure')),
(STATE_RETRY, _('Retry')),
)
timedelta = models.FloatField(default=0.0, verbose_name=_('Time'), null=True)
state = models.CharField(choices=STATUS_CHOICES, default=STATE_PENDING, max_length=16)
num = models.IntegerField(default=1)
result_summary = common_models.JsonDictTextField(blank=True, null=True, default={},
verbose_name=_('Result summary'))
result_raw = common_models.JsonDictTextField(blank=True, null=True, default={}, verbose_name=_('Result raw'))
date_created = models.DateTimeField(auto_now_add=True, null=True, verbose_name=_('Create time'))
date_start = models.DateTimeField(null=True, verbose_name=_('Start time'))
date_end = models.DateTimeField(null=True, verbose_name=_('End time'))
class Meta:
abstract = True
@property
def stdout(self):
with open(self.log_path, 'r') as f:
data = f.read()
return data
@property
def success_hosts(self):
return self.result_summary.get('contacted', []) if self.result_summary else []
@property
def failed_hosts(self):
return self.result_summary.get('dark', {}) if self.result_summary else []
@property
def log_path(self):
return get_celery_task_log_path(self.id)
```
#### File: ansible_api/models/project.py
```python
import os
import uuid
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
from common import models as common_models
from .inventory import Inventory
from ..ansible import BaseInventory
from ..ctx import set_current_project
__all__ = ['Project']
class Project(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.SlugField(max_length=128, allow_unicode=True, unique=True, verbose_name=_('Name'))
# Run full_options, ex: forks,
options = common_models.JsonCharField(max_length=1024, blank=True, null=True, verbose_name=_('Run options'))
comment = models.CharField(max_length=128, blank=True, null=True, verbose_name=_("Comment"))
meta = common_models.JsonDictTextField(blank=True, null=True)
created_by = models.CharField(max_length=128, blank=True, null=True, default='')
date_created = models.DateTimeField(auto_now_add=True)
__root_id = '00000000-0000-0000-0000-000000000000'
__public_id = '00000000-0000-0000-0000-000000000001'
def __str__(self):
return self.name
@property
def inventory(self):
return Inventory(self.host_set.all(), self.group_set.all())
@property
def inventory_file_path(self):
return os.path.join(self.project_dir, 'hosts.yaml')
def refresh_inventory_file(self):
with open(self.inventory_file_path, 'w') as f:
f.write(self.inventory.get_data(fmt='yaml'))
@property
def roles_dir(self):
roles_dir = os.path.join(self.project_dir, 'roles')
if not os.path.isdir(roles_dir):
os.makedirs(roles_dir, exist_ok=True)
return roles_dir
@property
def project_dir(self):
project_dir = os.path.join(settings.ANSIBLE_PROJECTS_DIR, self.name)
if not os.path.isdir(project_dir):
os.makedirs(project_dir, exist_ok=True)
return project_dir
@property
def playbooks_dir(self):
playbooks_dir = os.path.join(self.project_dir, 'playbooks')
if not os.path.isdir(playbooks_dir):
os.makedirs(playbooks_dir, exist_ok=True)
return playbooks_dir
@property
def adhoc_dir(self):
adhoc_dir = os.path.join(self.project_dir, 'adhoc')
if not os.path.isdir(adhoc_dir):
os.makedirs(adhoc_dir, exist_ok=True)
return adhoc_dir
@classmethod
def root_project(cls):
return cls(id=cls.__root_id, name='ROOT')
@classmethod
def public_project(cls):
return cls(id=cls.__public_id, name='Public')
def is_real(self):
return self.id not in [self.__root_id, self.__public_id]
@property
def inventory_obj(self):
return self.inventory.as_object()
def get_inventory_data(self):
return self.inventory.get_data(fmt='py')
def change_to(self):
set_current_project(self)
def clear_inventory(self):
self.group_set.all().delete()
self.host_set.all().delete()
@property
def cleaned_options(self):
options = self.options or {}
options['roles_path'] = [self.roles_dir]
return options
@staticmethod
def test_inventory():
data = {
"hosts": [
{
"hostname": "192.168.244.128",
"vars": {
"ansible_ssh_user": "root",
"ansible_ssh_pass": "<PASSWORD>"
}
},
{
"hostname": "gaga",
"vars": {
"ansible_ssh_host": "192.168.1.1"
}
}
],
"groups": [
{"name": "apache", "hosts": ["gaga"]},
{"name": "web", "hosts": ["192.168.244.128"],
"vars": {"hello": "world"}, "children": ["apache"]},
]
}
return data
@classmethod
def get_test_inventory(cls):
return BaseInventory(cls.test_inventory())
```
#### File: ansible_api/models/role.py
```python
from collections import OrderedDict
import os
import git
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from common import models as common_models
from .mixins import AbstractProjectResourceModel
from .utils import name_validator
from ..ansible.galaxy import MyGalaxyRole, MyGalaxyAPI
__all__ = ['Role']
class Role(AbstractProjectResourceModel):
STATE_NOT_INSTALL = 'uninstalled'
STATE_INSTALLED = 'installed'
STATE_INSTALLING = 'installing'
STATE_FAILED = 'failed'
STATE_CHOICES = (
(STATE_NOT_INSTALL, 'UnInstalled'),
(STATE_INSTALLED, 'Installed'),
(STATE_INSTALLING, 'Installing'),
(STATE_FAILED, 'Failed')
)
TYPE_GIT = 'git'
TYPE_HTTP = 'http'
TYPE_GALAXY = 'galaxy'
TYPE_FILE = 'file'
TYPE_CHOICES = (
(TYPE_GALAXY, 'galaxy'),
(TYPE_GIT, 'git'),
(TYPE_HTTP, 'http'),
(TYPE_FILE, 'file'),
)
name = models.CharField(max_length=128, validators=[name_validator])
type = models.CharField(max_length=16, choices=TYPE_CHOICES, default=TYPE_GALAXY)
comment = models.CharField(max_length=1024, blank=True, verbose_name=_("Comment"))
galaxy_name = models.CharField(max_length=128, blank=True, null=True)
git = common_models.JsonDictCharField(max_length=4096, default={'repo': '', 'branch': 'master'})
url = models.CharField(max_length=1024, verbose_name=_("Url"), blank=True)
logo = models.ImageField(verbose_name='Logo', upload_to="logo", null=True)
categories = models.CharField(max_length=256, verbose_name=_("Tags"), blank=True)
version = models.CharField(max_length=1024, blank=True, default='master')
state = models.CharField(default=STATE_NOT_INSTALL, choices=STATE_CHOICES, max_length=16)
meta = common_models.JsonDictTextField(verbose_name=_("Meta"), blank=True)
meta_ext = common_models.JsonDictTextField(verbose_name=_("Meta Ext"), blank=True)
created_by = models.CharField(max_length=128, blank=True, null=True, default='')
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('name', 'project')
def __str__(self):
return self.name
def delete(self, using=None, keep_parents=False):
role = MyGalaxyRole(self.name, path=self.project.roles_dir)
role.remove()
return super().delete(using=using, keep_parents=keep_parents)
@property
def _role(self):
role = MyGalaxyRole(self.name, path=self.project.roles_dir)
return role
@property
def variables(self):
return self._role.default_variables
@property
def role_dir(self):
return os.path.join(self.project.roles_dir, self.name)
@property
def meta_all(self):
meta = OrderedDict([
('name', self.name),
('version', self.version),
('comment', self.comment),
('state', self.get_state_display()),
('url', self.url),
('type', self.type),
('categories', self.categories),
])
if isinstance(self.meta, dict):
meta.update(self.meta)
if isinstance(self.meta_ext, dict):
meta.update(self.meta_ext)
meta.pop('readme', None)
meta.pop('readme_html', None)
galaxy_info = meta.pop('galaxy_info', {})
for k, v in galaxy_info.items():
if k == 'platforms':
v = ' '.join([i['name']+str(i['versions']) for i in v])
meta[k] = v
return meta
def install_from_galaxy(self):
api = MyGalaxyAPI()
role = MyGalaxyRole(self.galaxy_name, path=self.project.roles_dir)
success, error = role.install()
if success:
self.comment = api.lookup_role_by_name(self.galaxy_name)['description']
self.url = api.role_git_url(self.galaxy_name)
self.version = role.version
self.meta = role.metadata
categories = ''
if self.meta and self.meta['galaxy_info'].get('categories'):
categories = self.meta['galaxy_info']['categories']
elif self.meta and self.meta['galaxy_info'].get('galaxy_tags'):
categories = self.meta['galaxy_info']['galaxy_tags']
self.categories = ','.join(categories) if isinstance(categories, list) else str(categories)
os.rename(os.path.join(self.project.roles_dir, self.galaxy_name), self.role_dir)
return success, error
def install_from_git(self):
success, error = True, None
if not self.git.get('repo'):
success = False
error = 'Not repo get'
return success, error
print("Install playbook from: {}".format(self.git.get('repo')))
try:
if os.path.isdir(os.path.join(self.role_dir, '.git')):
repo = git.Repo(self.role_dir)
remote = repo.remote()
remote.pull()
else:
git.Repo.clone_from(
self.git['repo'], self.role_dir,
branch=self.git.get('branch'), depth=1,
)
except Exception as e:
success = False
error = e
return success, error
def install(self):
self.state = self.STATE_INSTALLING
if self.type == self.TYPE_GALAXY:
success, err = self.install_from_galaxy()
elif self.type == self.TYPE_GIT:
success, err = self.install_from_git()
else:
success = False
err = Exception("From {}, using other function".format(self.type))
if success:
self.state = self.STATE_INSTALLED
else:
self.state = self.STATE_FAILED
self.save()
return success, err
def uninstall(self):
role = MyGalaxyRole(self.name, path=self.project.roles_dir)
role.remove()
@property
def logo_url(self):
default = settings.STATIC_URL + "ansible/img/role_logo_default.png"
if self.logo:
return self.logo.url
return default
```
#### File: ansible_api/serializers/adhoc.py
```python
from rest_framework import serializers
from django.shortcuts import reverse
from ..models import AdHoc, AdHocExecution
from .mixins import ProjectSerializerMixin
__all__ = [
'AdHocReadSerializer', 'AdHocSerializer', 'AdHocExecutionSerializer',
]
class AdHocReadSerializer(serializers.ModelSerializer):
args = serializers.JSONField(required=False, allow_null=True)
result = serializers.JSONField(read_only=True)
summary = serializers.JSONField(read_only=True)
class Meta:
model = AdHoc
fields = [
'id', 'pattern', 'module', 'args', 'project', 'summary', 'result'
]
read_only_fields = (
'id', 'created_by',
)
class AdHocSerializer(AdHocReadSerializer, ProjectSerializerMixin):
pass
class AdHocExecutionSerializer(serializers.ModelSerializer, ProjectSerializerMixin):
result_summary = serializers.JSONField(read_only=True)
log_url = serializers.SerializerMethodField()
log_ws_url = serializers.SerializerMethodField()
class Meta:
model = AdHocExecution
fields = [
'id', 'num', 'state', 'timedelta', 'log_url', 'log_ws_url',
'result_summary', 'date_created', 'date_start', 'date_end',
'adhoc', 'project'
]
read_only_fields = [
'id', 'num', 'state', 'timedelta', 'log_url', 'log_ws_url',
'result_summary', 'date_created', 'date_start', 'date_end',
'project'
]
@staticmethod
def get_log_url(obj):
return reverse('celery-api:task-log-api', kwargs={'pk': obj.id})
@staticmethod
def get_log_ws_url(obj):
return '/ws/tasks/{}/log/'.format(obj.id)
```
#### File: api/ansible_api/signal_handlers.py
```python
import logging
import uuid
from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_save, post_delete
from django.utils import timezone
from celery import current_task
from .models import Playbook, Role, PlaybookExecution
from .signals import pre_execution_start, post_execution_start
logger = logging.getLogger(__file__)
@receiver(post_save, sender=Playbook)
def on_playbook_create_or_update(sender, instance, created, **kwargs):
if instance.is_periodic and instance.is_active:
if created:
instance.create_period_task()
else:
instance.disable_period_task()
@receiver(pre_delete, sender=Playbook)
def on_playbook_delete(sender, instance, **kwargs):
instance.cleanup()
@receiver(post_save, sender=Role)
def on_role_create_or_update(sender, instance, created, **kwargs):
from .tasks import install_role
if created:
install_role.delay(instance.id)
@receiver(pre_execution_start)
def on_execution_start(sender, execution, **kwargs):
execution.date_start = timezone.now()
execution.state = execution.STATE_STARTED
execution.save()
@receiver(post_execution_start)
def on_execution_end(sender, execution, result, **kwargs):
date_finished = timezone.now()
timedelta = (timezone.now() - execution.date_start).seconds
state = execution.STATE_FAILURE
if result.get('summary', {}).get("success", False):
state = execution.STATE_SUCCESS
execution.result_summary = result.get('summary', {})
execution.result_raw = result.get('raw', {})
execution.state = state
execution.date_finished = date_finished
execution.timedelta = timedelta
execution.save()
```
#### File: api/celery_api/logger.py
```python
from logging import StreamHandler
from django.conf import settings
from celery import current_task
from celery.signals import task_prerun, task_postrun
from kombu import Connection, Exchange, Queue, Producer
from kombu.mixins import ConsumerMixin
from .utils import get_celery_task_log_path
routing_key = 'celery_log'
celery_log_exchange = Exchange('celery_log_exchange', type='direct')
celery_log_queue = [Queue('celery_log', celery_log_exchange, routing_key=routing_key)]
class CeleryLoggerConsumer(ConsumerMixin):
def __init__(self):
self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=celery_log_queue,
accept=['pickle', 'json'],
callbacks=[self.process_task])
]
def handle_task_start(self, task_id, message):
pass
def handle_task_end(self, task_id, message):
pass
def handle_task_log(self, task_id, msg, message):
pass
def process_task(self, body, message):
action = body.get('action')
task_id = body.get('task_id')
msg = body.get('msg')
if action == CeleryLoggerProducer.ACTION_TASK_LOG:
self.handle_task_log(task_id, msg, message)
elif action == CeleryLoggerProducer.ACTION_TASK_START:
self.handle_task_start(task_id, message)
elif action == CeleryLoggerProducer.ACTION_TASK_END:
self.handle_task_end(task_id, message)
class CeleryLoggerProducer:
ACTION_TASK_START, ACTION_TASK_LOG, ACTION_TASK_END = range(3)
def __init__(self):
self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
@property
def producer(self):
return Producer(self.connection)
def publish(self, payload):
self.producer.publish(
payload, serializer='json', exchange=celery_log_exchange,
declare=[celery_log_exchange], routing_key=routing_key
)
def log(self, task_id, msg):
payload = {'task_id': task_id, 'msg': msg, 'action': self.ACTION_TASK_LOG}
return self.publish(payload)
def read(self):
pass
def flush(self):
pass
def task_end(self, task_id):
payload = {'task_id': task_id, 'action': self.ACTION_TASK_END}
return self.publish(payload)
def task_start(self, task_id):
payload = {'task_id': task_id, 'action': self.ACTION_TASK_START}
return self.publish(payload)
class CeleryTaskLoggerHandler(StreamHandler):
terminator = '\r\n'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
task_prerun.connect(self.on_task_start)
task_postrun.connect(self.on_start_end)
@staticmethod
def get_current_task_id():
if not current_task:
return
task_id = current_task.request.root_id
return task_id
def on_task_start(self, sender, task_id, **kwargs):
return self.handle_task_start(task_id)
def on_start_end(self, sender, task_id, **kwargs):
return self.handle_task_end(task_id)
def after_task_publish(self, sender, body, **kwargs):
pass
def emit(self, record):
task_id = self.get_current_task_id()
if not task_id:
return
try:
self.write_task_log(task_id, record)
self.flush()
except Exception:
self.handleError(record)
def write_task_log(self, task_id, msg):
pass
def handle_task_start(self, task_id):
pass
def handle_task_end(self, task_id):
pass
class CeleryTaskMQLoggerHandler(CeleryTaskLoggerHandler):
def __init__(self):
self.producer = CeleryLoggerProducer()
super().__init__(stream=None)
def write_task_log(self, task_id, record):
msg = self.format(record)
self.producer.log(task_id, msg)
def flush(self):
self.producer.flush()
class CeleryTaskFileHandler(CeleryTaskLoggerHandler):
def __init__(self):
self.f = None
super().__init__(stream=None)
def emit(self, record):
msg = self.format(record)
if not self.f:
return
self.f.write(msg)
self.f.write(self.terminator)
self.flush()
def flush(self):
self.f and self.f.flush()
def handle_task_start(self, task_id):
log_path = get_celery_task_log_path(task_id)
self.f = open(log_path, 'a')
def handle_task_end(self, task_id):
self.f and self.f.close()
```
#### File: api/cloud_provider/signal_handlers.py
```python
from django.db.models.signals import m2m_changed, post_save, pre_save
from django.dispatch import receiver
from cloud_provider.models import Region, Zone
@receiver(post_save, sender=Region)
def on_region_save(sender, instance=None, created=True, **kwargs):
if created:
instance.on_region_create()
@receiver(post_save, sender=Zone)
def on_zone_save(sender, instance=None, created=True, **kwargs):
if created:
instance.on_zone_create()
```
#### File: api/common/utils.py
```python
import os
import copy
import tarfile
import zipfile
import gzip
import paramiko
from io import StringIO
from itsdangerous import TimedJSONWebSignatureSerializer, \
JSONWebSignatureSerializer, BadSignature, SignatureExpired
from django.conf import settings
def get_object_or_none(model, **kwargs):
try:
obj = model.objects.get(**kwargs)
except model.DoesNotExist:
return None
return obj
def shadow_key(iterable, key=None, remove=False):
data_dump = copy.deepcopy(iterable)
if isinstance(iterable, dict):
for k, v in data_dump.items():
if isinstance(key, (str, tuple)) and k == key or key(k):
if remove:
del iterable[k]
else:
iterable[k] = "******"
if isinstance(v, (list, tuple, dict)):
iterable[k] = shadow_key(v, key=key)
elif isinstance(iterable, (list, tuple)):
for item in data_dump:
iterable.remove(item)
iterable.append(shadow_key(item, key=key))
return iterable
class Singleton(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
return cls.__instance
else:
return cls.__instance
class Signer(metaclass=Singleton):
"""用来加密,解密,和基于时间戳的方式验证token"""
def __init__(self, secret_key=None):
self.secret_key = secret_key
def sign(self, value):
s = JSONWebSignatureSerializer(self.secret_key)
return s.dumps(value).decode()
def unsign(self, value):
if value is None:
return value
s = JSONWebSignatureSerializer(self.secret_key)
try:
return s.loads(value)
except BadSignature:
return {}
def sign_t(self, value, expires_in=3600):
s = TimedJSONWebSignatureSerializer(self.secret_key, expires_in=expires_in)
return str(s.dumps(value), encoding="utf8")
def unsign_t(self, value):
s = TimedJSONWebSignatureSerializer(self.secret_key)
try:
return s.loads(value)
except (BadSignature, SignatureExpired):
return {}
def get_signer():
signer = Signer(settings.SECRET_KEY)
return signer
def uncompress_tar(src_file, dest_dir):
try:
tar = tarfile.open(src_file)
names = tar.getnames()
for name in names:
tar.extract(name, dest_dir)
tar.close()
except Exception as e:
return False, e
def uncompress_zip(src_file, dest_dir):
try:
zip_file = zipfile.ZipFile(src_file)
for names in zip_file.namelist():
zip_file.extract(names, dest_dir)
zip_file.close()
except Exception as e:
return False, e
def uncompress_gz(src_file, dest_dir):
try:
f_name = dest_dir + '/' + os.path.basename(src_file)
# 获取文件的名称,去掉
g_file = gzip.GzipFile(src_file)
# 创建gzip对象
open(f_name, "w+").write(g_file.read())
g_file.close()
except Exception as e:
return False, e
def ssh_key_string_to_obj(text, password=None):
key = None
try:
key = paramiko.RSAKey.from_private_key(StringIO(text), password=password)
except paramiko.SSHException:
pass
try:
key = paramiko.DSSKey.from_private_key(StringIO(text), password=password)
except paramiko.SSHException:
pass
return key
```
#### File: api/kubeops_api/components.py
```python
from kubeops_api.models.setting import Setting
http_prefix = 'http://'
https_prefix = 'https://'
def get_component_urls(cluster):
urls = {}
app_url = cluster.get_config("APP_DOMAIN").get('value')
if app_url:
urls = {
"grafana": http_prefix + "grafana." + app_url,
"prometheus": http_prefix + "prometheus." + app_url,
"registry-ui": http_prefix + "registry-ui." + app_url,
"dashboard": https_prefix + "dashboard." + app_url,
"traefik": http_prefix + "traefik." + app_url,
"scope": http_prefix + "scope.weave." + app_url,
"ceph": http_prefix + "ceph." + app_url
}
return urls
```
#### File: kubeops_api/migrations/0040_auto_20191104_1009.py
```python
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kubeops_api', '0039_dns'),
]
def forwards_func(apps, schema_editor):
DNS = apps.get_model("kubeops_api", "DNS")
db_alias = schema_editor.connection.alias
DNS.objects.using(db_alias).create(dns1="172.16.58.3", dns2="8.8.8.8")
def reverse_func(apps, schema_editor):
DNS = apps.get_model("kubeops_api", "DNS")
db_alias = schema_editor.connection.alias
for dns in DNS.objects.using(db_alias).all():
dns.delete()
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
```
#### File: kubeops_api/models/package.py
```python
import os
import threading
import uuid
import yaml
from django.db import models
from common.models import JsonTextField
from django.utils.translation import ugettext_lazy as _
from fit2ansible.settings import PACKAGE_DIR
from kubeops_api.package_manage import *
logger = logging.getLogger('kubeops')
__all__ = ['Package']
class Package(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
packages_dir = PACKAGE_DIR
def __str__(self):
return self.name
class Meta:
verbose_name = _('Package')
@property
def path(self):
return os.path.join(self.packages_dir, self.name)
@property
def repo_port(self):
return self.meta['vars']['repo_port']
@property
def registry_port(self):
return self.meta['vars']['registry_port']
@classmethod
def lookup(cls):
logger.info('lookup package...')
for d in os.listdir(cls.packages_dir):
full_path = os.path.join(cls.packages_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
logger.info('save package {}...'.format(d))
instance = cls.objects.update_or_create(defaults=defaults, name=d)[0]
thread = threading.Thread(target=cls.start_container(instance))
thread.start()
@classmethod
def start_container(cls, package):
if not is_package_container_exists(package.name):
create_package_container(package)
return
if not is_package_container_start(package.name):
start_package_container(package)
```
#### File: users/migrations/0001_create_user.py
```python
from django.contrib.auth.hashers import make_password
from django.db import migrations, models
def add_default_admin(apps, schema_editor):
user_model = apps.get_model("auth", "User")
db_alias = schema_editor.connection.alias
user_model.objects.using(db_alias).create(
username="admin",
email="<EMAIL>",
password=make_password("<PASSWORD>"),
is_superuser=True,
is_staff=True
)
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(add_default_admin),
]
``` |
{
"source": "2429581027/local-geometric-guided",
"score": 2
} |
#### File: local-geometric-guided/models/DeepFit_pcp.py
```python
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import numpy as np
import torch.nn.functional as F
import normal_estimation_utils
import ThreeDmFVNet
def fit_Wjet(points, weights, order=2, compute_neighbor_normals=False):
"""
Fit a "n-jet" (n-order truncated Taylor expansion) to a point clouds with weighted points.
We assume that PCA was performed on the points beforehand.
To do a classic jet fit input weights as a one vector.
:param points: xyz points coordinates
:param weights: weight vector (weight per point)
:param order: n-order of the jet
:param compute_neighbor_normals: bool flag to compute neighboring point normal vector
:return: beta: polynomial coefficients
:return: n_est: normal estimation
:return: neighbor_normals: analytically computed normals of neighboring points
"""
neighbor_normals = None
batch_size, D, n_points = points.shape
# compute the vandermonde matrix
x = points[:, 0, :].unsqueeze(-1)
y = points[:, 1, :].unsqueeze(-1)
z = points[:, 2, :].unsqueeze(-1)
weights = weights.unsqueeze(-1)
# handle zero weights - if all weights are zero set them to 1
valid_count = torch.sum(weights > 1e-3, dim=1)
w_vector = torch.where(valid_count > 18, weights.view(batch_size, -1),
torch.ones_like(weights, requires_grad=True).view(batch_size, -1)).unsqueeze(-1)
if order > 1:
#pre conditioning
h = (torch.mean(torch.abs(x), 1) + torch.mean(torch.abs(y), 1)) / 2 # absolute value added from https://github.com/CGAL/cgal/blob/b9e320659e41c255d82642d03739150779f19575/Jet_fitting_3/include/CGAL/Monge_via_jet_fitting.h
# h = torch.mean(torch.sqrt(x*x + y*y), dim=2)
idx = torch.abs(h) < 0.0001
h[idx] = 0.1
# h = 0.1 * torch.ones(batch_size, 1, device=points.device)
x = x / h.unsqueeze(-1).repeat(1, n_points, 1)
y = y / h.unsqueeze(-1).repeat(1, n_points, 1)
if order == 1:
A = torch.cat([x, y, torch.ones_like(x)], dim=2)
elif order == 2:
A = torch.cat([x, y, x * x, y * y, x * y, torch.ones_like(x)], dim=2)
h_2 = h * h
D_inv = torch.diag_embed(1/torch.cat([h, h, h_2, h_2, h_2, torch.ones_like(h)], dim=1))
elif order == 3:
y_2 = y * y
x_2 = x * x
xy = x * y
A = torch.cat([x, y, x_2, y_2, xy, x_2 * x, y_2 * y, x_2 * y, y_2 * x, torch.ones_like(x)], dim=2)
h_2 = h * h
h_3 = h_2 * h
D_inv = torch.diag_embed(1/torch.cat([h, h, h_2, h_2, h_2, h_3, h_3, h_3, h_3, torch.ones_like(h)], dim=1))
elif order == 4:
y_2 = y * y
x_2 = x * x
x_3 = x_2 * x
y_3 = y_2 * y
xy = x * y
A = torch.cat([x, y, x_2, y_2, xy, x_3, y_3, x_2 * y, y_2 * x, x_3 * x, y_3 * y, x_3 * y, y_3 * x, y_2 * x_2,
torch.ones_like(x)], dim=2)
h_2 = h * h
h_3 = h_2 * h
h_4 = h_3 * h
D_inv = torch.diag_embed(1/torch.cat([h, h, h_2, h_2, h_2, h_3, h_3, h_3, h_3, h_4, h_4, h_4, h_4, h_4, torch.ones_like(h)], dim=1))
else:
raise ValueError("Polynomial order unsupported, please use 1 or 2 ")
XtX = torch.matmul(A.permute(0, 2, 1), w_vector * A)
XtY = torch.matmul(A.permute(0, 2, 1), w_vector * z)
beta = solve_linear_system(XtX, XtY, sub_batch_size=16)
if order > 1: #remove preconditioning
beta = torch.matmul(D_inv, beta)
n_est = torch.nn.functional.normalize(torch.cat([-beta[:, 0:2].squeeze(-1), torch.ones(batch_size, 1, device=x.device, dtype=beta.dtype)], dim=1), p=2, dim=1)
if compute_neighbor_normals:
beta_ = beta.squeeze().unsqueeze(1).repeat(1, n_points, 1).unsqueeze(-1)
if order == 1:
neighbor_normals = n_est.unsqueeze(1).repeat(1, n_points, 1)
elif order == 2:
neighbor_normals = torch.nn.functional.normalize(
torch.cat([-(beta_[:, :, 0] + 2 * beta_[:, :, 2] * x + beta_[:, :, 4] * y),
-(beta_[:, :, 1] + 2 * beta_[:, :, 3] * y + beta_[:, :, 4] * x),
torch.ones(batch_size, n_points, 1, device=x.device)], dim=2), p=2, dim=2)
elif order == 3:
neighbor_normals = torch.nn.functional.normalize(
torch.cat([-(beta_[:, :, 0] + 2 * beta_[:, :, 2] * x + beta_[:, :, 4] * y + 3 * beta_[:, :, 5] * x_2 +
2 *beta_[:, :, 7] * xy + beta_[:, :, 8] * y_2),
-(beta_[:, :, 1] + 2 * beta_[:, :, 3] * y + beta_[:, :, 4] * x + 3 * beta_[:, :, 6] * y_2 +
beta_[:, :, 7] * x_2 + 2 * beta_[:, :, 8] * xy),
torch.ones(batch_size, n_points, 1, device=x.device)], dim=2), p=2, dim=2)
elif order == 4:
# [x, y, x_2, y_2, xy, x_3, y_3, x_2 * y, y_2 * x, x_3 * x, y_3 * y, x_3 * y, y_3 * x, y_2 * x_2
neighbor_normals = torch.nn.functional.normalize(
torch.cat([-(beta_[:, :, 0] + 2 * beta_[:, :, 2] * x + beta_[:, :, 4] * y + 3 * beta_[:, :, 5] * x_2 +
2 * beta_[:, :, 7] * xy + beta_[:, :, 8] * y_2 + 4 * beta_[:, :, 9] * x_3 + 3 * beta_[:, :, 11] * x_2 * y
+ beta_[:, :, 12] * y_3 + 2 * beta_[:, :, 13] * y_2 * x),
-(beta_[:, :, 1] + 2 * beta_[:, :, 3] * y + beta_[:, :, 4] * x + 3 * beta_[:, :, 6] * y_2 +
beta_[:, :, 7] * x_2 + 2 * beta_[:, :, 8] * xy + 4 * beta_[:, :, 10] * y_3 + beta_[:, :, 11] * x_3 +
3 * beta_[:, :, 12] * x * y_2 + 2 * beta_[:, :, 13] * y * x_2),
torch.ones(batch_size, n_points, 1, device=x.device)], dim=2), p=2, dim=2)
return beta.squeeze(), n_est, neighbor_normals
def solve_linear_system(XtX, XtY, sub_batch_size=None):
"""
Solve linear system of equations. use sub batches to avoid MAGMA bug
:param XtX: matrix of the coefficients
:param XtY: vector of the
:param sub_batch_size: size of mini mini batch to avoid MAGMA error, if None - uses the entire batch
:return:
"""
if sub_batch_size is None:
sub_batch_size = XtX.size(0)
n_iterations = int(XtX.size(0) / sub_batch_size)
assert sub_batch_size%sub_batch_size == 0, "batch size should be a factor of {}".format(sub_batch_size)
beta = torch.zeros_like(XtY)
n_elements = XtX.shape[2]
for i in range(n_iterations):
try:
L = torch.cholesky(XtX[sub_batch_size * i:sub_batch_size * (i + 1), ...], upper=False)
beta[sub_batch_size * i:sub_batch_size * (i + 1), ...] = \
torch.cholesky_solve(XtY[sub_batch_size * i:sub_batch_size * (i + 1), ...], L, upper=False)
except:
# # add noise to diagonal for cases where XtX is low rank
eps = torch.normal(torch.zeros(sub_batch_size, n_elements, device=XtX.device),
0.01 * torch.ones(sub_batch_size, n_elements, device=XtX.device))
eps = torch.diag_embed(torch.abs(eps))
XtX[sub_batch_size * i:sub_batch_size * (i + 1), ...] = \
XtX[sub_batch_size * i:sub_batch_size * (i + 1), ...] + \
eps * XtX[sub_batch_size * i:sub_batch_size * (i + 1), ...]
try:
L = torch.cholesky(XtX[sub_batch_size * i:sub_batch_size * (i + 1), ...], upper=False)
beta[sub_batch_size * i:sub_batch_size * (i + 1), ...] = \
torch.cholesky_solve(XtY[sub_batch_size * i:sub_batch_size * (i + 1), ...], L, upper=False)
except:
beta[sub_batch_size * i:sub_batch_size * (i + 1), ...], _ =\
torch.solve(XtY[sub_batch_size * i:sub_batch_size * (i + 1), ...], XtX[sub_batch_size * i:sub_batch_size * (i + 1), ...])
return beta
class PointNetFeatures(nn.Module):
def __init__(self, num_points=500, num_scales=1, use_point_stn=False, use_feat_stn=False, point_tuple=1, sym_op='max'):
super(PointNetFeatures, self).__init__()
self.num_points=num_points
self.point_tuple=point_tuple
self.sym_op = sym_op
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.num_scales=num_scales
self.conv1 = torch.nn.Conv1d(21, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 64, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
if self.use_point_stn:
# self.stn1 = STN(num_scales=self.num_scales, num_points=num_points, dim=3, sym_op=self.sym_op)
self.stn1 = QSTN(num_scales=self.num_scales, num_points=num_points*self.point_tuple, dim=3, sym_op=self.sym_op)
if self.use_feat_stn:
self.stn2 = STN(num_scales=self.num_scales, num_points=num_points, dim=64, sym_op=self.sym_op)
def forward(self, x):
n_pts = x.size()[2]
points = x[: , 0:3, :]
features = x[: , 3: , :]
# input transform
if self.use_point_stn:
# from tuples to list of single points
points = points.view(points.size(0), 3, -1)
trans = self.stn1(points)
points = points.transpose(2, 1)
points = torch.bmm(points, trans)
points = points.transpose(2, 1)
points = points.contiguous().view(points.size(0), 3 * self.point_tuple, -1)
else:
trans = None
x = torch.cat((points, features) , 1)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
# feature transform
if self.use_feat_stn:
trans2 = self.stn2(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans2)
x = x.transpose(2, 1)
else:
trans2 = None
return x, trans, trans2, points
class PointNetEncoder(nn.Module):
def __init__(self, num_points=500, num_scales=1, use_point_stn=False, use_feat_stn=False, point_tuple=1, sym_op='max'):
super(PointNetEncoder, self).__init__()
self.pointfeat = PointNetFeatures(num_points=num_points, num_scales=num_scales, use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn, point_tuple=point_tuple, sym_op=sym_op)
self.num_points=num_points
self.point_tuple=point_tuple
self.sym_op = sym_op
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.num_scales=num_scales
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
def forward(self, pts_feas):
n_pts = pts_feas.size()[2]
pointfeat, trans, trans2, points = self.pointfeat(pts_feas)
x = F.relu(self.bn2(self.conv2(pointfeat)))
x = self.bn3(self.conv3(x))
global_feature = torch.max(x, 2, keepdim=True)[0]
x = global_feature.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), global_feature.squeeze(), trans, trans2, points
class PointNet3DmFVEncoder(nn.Module):
def __init__(self, num_points=500, num_scales=1, use_point_stn=False, use_feat_stn=False, point_tuple=1, sym_op='max', n_gaussians=5):
super(PointNet3DmFVEncoder, self).__init__()
self.num_points = num_points
self.point_tuple = point_tuple
self.sym_op = sym_op
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.num_scales = num_scales
self.pointfeat = PointNetFeatures(num_points=num_points, num_scales=num_scales, use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn, point_tuple=point_tuple, sym_op=sym_op)
self.n_gaussians = n_gaussians
self.gmm = ThreeDmFVNet.get_3d_grid_gmm(subdivisions=[self.n_gaussians, self.n_gaussians, self.n_gaussians],
variance=np.sqrt(1.0 / self.n_gaussians))
def forward(self, x):
points = x
n_pts = x.size()[2]
pointfeat, trans, trans2, points = self.pointfeat(points)
global_feature = ThreeDmFVNet.get_3DmFV_pytorch(points.permute([0, 2, 1]), self.gmm.weights_, self.gmm.means_,
np.sqrt(self.gmm.covariances_), normalize=True)
global_feature = torch.flatten(global_feature, start_dim=1)
x = global_feature.unsqueeze(-1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), global_feature.squeeze(), trans, trans2, points
class DeepFit(nn.Module):
def __init__(self, k=1, num_points=500, use_point_stn=False, use_feat_stn=False, point_tuple=1,
sym_op='max', arch=None, n_gaussians=5, jet_order=2, weight_mode="tanh",
use_consistency=False):
super(DeepFit, self).__init__()
self.k = k # k is the number of weights per point e.g. 1
self.num_points=num_points
self.point_tuple = point_tuple
if arch == '3dmfv':
self.n_gaussians = n_gaussians # change later to get this as input
self.feat = PointNet3DmFVEncoder(num_points=num_points, use_point_stn=use_point_stn, use_feat_stn=use_feat_stn,
point_tuple=point_tuple, sym_op=sym_op, n_gaussians= self.n_gaussians )
feature_dim = self.n_gaussians * self.n_gaussians * self.n_gaussians * 20 + 64
else:
self.feat = PointNetEncoder(num_points=num_points, use_point_stn=use_point_stn, use_feat_stn=use_feat_stn,
point_tuple=point_tuple, sym_op=sym_op)
feature_dim = 1024 + 64
self.conv1 = nn.Conv1d(feature_dim, 512, 1)
self.conv2 = nn.Conv1d(512, 256, 1)
self.conv3 = nn.Conv1d(256, 128, 1)
self.conv4 = nn.Conv1d(128, self.k, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
self.conv5 = nn.Conv1d(3, 64, 1)
self.conv6 = nn.Conv1d(64, 128, 1)
self.fc1 = nn.Linear(128, 64, 1)
self.fc2 = nn.Linear(64, 3, 1)
self.bn5 = nn.BatchNorm1d(64)
self.bn6 = nn.BatchNorm1d(128)
self.bnfc1 = nn.BatchNorm1d(64)
self.jet_order = jet_order
self.weight_mode = weight_mode
self.compute_neighbor_normals = use_consistency
self.do = torch.nn.Dropout(0.25)
def forward(self, pts_feas):
x, _, trans, trans2, points = self.feat(pts_feas)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# point weight estimation.
if self.weight_mode == "softmax":
x = F.softmax(self.conv4(x))
weights = 0.01 + x # add epsilon for numerical robustness
elif self.weight_mode =="tanh":
x = torch.tanh(self.conv4(x))
weights = (0.01 + torch.ones_like(x) + x) / 2.0 # learn the residual->weights start at 1
elif self.weight_mode =="sigmoid":
weights = 0.01 + torch.sigmoid(self.conv4(x))
beta, normal, neighbor_normals = fit_Wjet(points, weights.squeeze(), order=self.jet_order,
compute_neighbor_normals=self.compute_neighbor_normals)
points = torch.mul(points, weights)
x = F.relu(self.bn5(self.conv5(points)))
x = F.relu(self.bn6(self.conv6(x)))
x = torch.max(x, 2, keepdim=True)[0]
#n_pts = x.size()[2]
#x_g = global_feature.view(-1, 128, 1).repeat(1, 1, n_pts)
#x = torch.cat([x_g, x], 1)
x = F.relu(self.bnfc1(self.fc1(x.squeeze(-1))))
normal_final = self.fc2(x)
#normal_final_len = torch.max(normal_final.new_tensor([sys.float_info.epsilon*100]), normal_final.norm(p=2, dim=1, keepdim=True))
#normal_final = normal_final / normal_final_len
return normal_final, normal, beta.squeeze(), weights.squeeze(), trans, trans2, neighbor_normals
class STN(nn.Module):
def __init__(self, num_scales=1, num_points=500, dim=3, sym_op='max'):
super(STN, self).__init__()
self.dim = dim
self.sym_op = sym_op
self.num_scales = num_scales
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(self.dim, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, self.dim*self.dim)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
if self.num_scales > 1:
self.fc0 = nn.Linear(1024*self.num_scales, 1024)
self.bn0 = nn.BatchNorm1d(1024)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# symmetric operation over all points
if self.num_scales == 1:
x = self.mp1(x)
else:
x_scales = x.new_empty(x.size(0), 1024*self.num_scales, 1)
for s in range(self.num_scales):
x_scales[:, s*1024:(s+1)*1024, :] = self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
x = x_scales
x = x.view(-1, 1024*self.num_scales)
if self.num_scales > 1:
x = F.relu(self.bn0(self.fc0(x)))
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = torch.eye(self.dim, dtype=x.dtype, device=x.device).view(1, self.dim*self.dim).repeat(batchsize, 1)
x = x + iden
x = x.view(-1, self.dim, self.dim)
return x
class QSTN(nn.Module):
def __init__(self, num_scales=1, num_points=500, dim=3, sym_op='max'):
super(QSTN, self).__init__()
self.dim = dim
self.sym_op = sym_op
self.num_scales = num_scales
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(self.dim, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 4)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
if self.num_scales > 1:
self.fc0 = nn.Linear(1024*self.num_scales, 1024)
self.bn0 = nn.BatchNorm1d(1024)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# symmetric operation over all points
if self.num_scales == 1:
x = self.mp1(x)
else:
x_scales = x.new_empty(x.size(0), 1024*self.num_scales, 1)
for s in range(self.num_scales):
x_scales[:, s*1024:(s+1)*1024, :] = self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
x = x_scales
x = x.view(-1, 1024*self.num_scales)
if self.num_scales > 1:
x = F.relu(self.bn0(self.fc0(x)))
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
# add identity quaternion (so the network can output 0 to leave the point cloud identical)
iden = x.new_tensor([1, 0, 0, 0])
x = x + iden
# convert quaternion to rotation matrix
x = normal_estimation_utils.batch_quat_to_rotmat(x)
return x
def compute_principal_curvatures(beta):
"""
given the jet coefficients, compute the principal curvatures and principal directions:
the eigenvalues and eigenvectors of the weingarten matrix
:param beta: batch of Jet coefficients vector
:return: k1, k2, dir1, dir2: batch of principal curvatures and principal directions
"""
with torch.no_grad():
if beta.shape[1] < 5:
raise ValueError("Can't compute curvatures for jet with order less than 2")
else:
b1_2 = torch.pow(beta[:, 0], 2)
b2_2 = torch.pow(beta[:, 1], 2)
#first fundemental form
E = (1 + b1_2).view(-1, 1, 1)
G = (1 + b2_2).view(-1, 1, 1)
F = (beta[:, 1] * beta[:, 0]).view(-1, 1, 1)
I = torch.cat([torch.cat([E, F], dim=2), torch.cat([F, G], dim=2)], dim=1)
# second fundemental form
norm_N0 = torch.sqrt(b1_2 + b2_2 + 1)
e = (2*beta[:, 2] / norm_N0).view(-1, 1, 1)
f = (beta[:, 4] / norm_N0).view(-1, 1, 1)
g = (2*beta[:, 3] / norm_N0).view(-1, 1, 1)
II = torch.cat([torch.cat([e, f], dim=2), torch.cat([f, g], dim=2)], dim=1)
M_weingarten = -torch.bmm(torch.inverse(I), II)
curvatures, dirs = torch.symeig(M_weingarten, eigenvectors=True) #faster
dirs = torch.cat([dirs, torch.zeros(dirs.shape[0], 2, 1, device=dirs.device)], dim=2) # pad zero in the normal direction
return curvatures, dirs
``` |
{
"source": "243286065/mini_chromium",
"score": 3
} |
#### File: toolchain/win/manifest_wrapper.py
```python
import subprocess
import sys
def GetEnv(arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def main(arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print(line)
return popen.returncode
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
``` |
{
"source": "2435191/Ludwig-Ahgren-Subathon-OCR",
"score": 2
} |
#### File: Ludwig-Ahgren-Subathon-OCR/src/downloader.py
```python
import concurrent.futures as cf
import os
import sys
import time
from math import ceil
from queue import Queue
from threading import Lock, Thread
import cv2
import pafy
import pandas
from src.image_processing import LudFrame, TimeStringParsingError, CroppedOcrError
from src.utils import seconds_to_string
class LudVideo():
def __init__(self, url, dest, min_res=360, crop_left=0, crop_right=1, record_ims=False):
assert crop_left < crop_right
self.crop_left = crop_left
self.crop_right = crop_right
self.record_ims = record_ims
self.dest = dest
self.url = url
self._q = Queue(1000)
self.get_frames_done = False
self.video = pafy.new(self.url)
self.length = self.video.length * (crop_right - crop_left)
for v in self.video.videostreams:
# must be mp4: https://github.com/opencv/opencv/issues/19924
if v.dimensions[1] == min_res and v.extension == 'mp4':
self.stream = v
break
self.sample_capture = cv2.VideoCapture(self.stream.url)
self.total_frames = int(self.sample_capture.get(cv2.CAP_PROP_FRAME_COUNT))
self.frames = int(self.total_frames * (crop_right - crop_left))
self.frames_offset = self.total_frames * crop_left
self.fps = self.sample_capture.get(cv2.CAP_PROP_FPS)
self.ms_per_frame = 1000 / self.fps
self.df = pandas.read_csv(self.dest, index_col=['frame'])
self.df_lock = Lock()
def get_frames(self, start_frame, end_frame):
frames_completed = 0
capture = cv2.VideoCapture(self.stream.url)
capture.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
old_ms = capture.get(cv2.CAP_PROP_POS_MSEC)
while frames_completed < end_frame - start_frame:
check = capture.grab()
if not check:
break
if (ms := capture.get(cv2.CAP_PROP_POS_MSEC)) // 1000 != old_ms // 1000: # no more than one queue put every second
old_ms = ms
if abs(ms % 1000 - 1000) < self.ms_per_frame or abs(ms % 1000) < self.ms_per_frame:
_, frame = capture.retrieve()
self._q.put((int(ms / 1000 * self.fps), frame))
frames_completed += 1
capture.release()
def ocr_frame(self, frame):
for _ in range(2):
try:
formatted_string = frame.get_str_from_cropped()
return formatted_string
except CroppedOcrError:
frame.update_bbox()
return ""
def format_from_str(self, frame, string, recurse=False):
for _ in range(2):
try:
ts, string = LudFrame.get_timestamp_from_str(string)
return ts, string
except TimeStringParsingError:
frame.update_bbox()
return -1, ""
def process_frame(self, idx, frame):
frame = LudFrame(frame, self.record_ims)
raw_ocr_string = self.ocr_frame(frame)
ts, string = self.format_from_str(frame, raw_ocr_string)
with self.df_lock:
self.df.loc[idx, :] = [seconds_to_string(idx / self.fps), ts, string]
self.df.to_csv(self.dest)
with self.lines_completed_lock:
self.lines_completed += 1
def go(self, download_workers=3, processing_workers=2, start_frac=0):
# https://stackoverflow.com/questions/41648103/how-would-i-go-about-using-concurrent-futures-and-queues-for-a-real-time-scenari
with cf.ThreadPoolExecutor(max_workers=download_workers+processing_workers) as executor:
self.lines_completed = self.frames * start_frac / self.fps
self.lines_completed_lock = Lock()
start_time = time.time()
get_frames_per_fut = ceil(self.frames / download_workers)
futs = [
executor.submit(
self.get_frames,
int( (i+start_frac) * get_frames_per_fut + self.frames_offset),
int( (i+1 ) * get_frames_per_fut + self.frames_offset)
) for i in range(download_workers)
]
while futs:
try:
done, _ = cf.wait(futs, timeout=1, return_when=cf.FIRST_COMPLETED)
while not self._q.empty() and len(futs) < download_workers + processing_workers:
idx, frame = self._q.get(True)
futs.append(executor.submit(self.process_frame, idx, frame))
for future in done:
futs.remove(future)
with self.lines_completed_lock:
frac_done = self.lines_completed / (self.frames / self.fps)
elapsed = time.time() - start_time
if frac_done != start_frac:
time_remaining = round( (1 - frac_done - start_frac) * elapsed / (frac_done - start_frac) / 3600, 5 )
else:
time_remaining = None
os.system('clear')
print(
f"""frac_done == {frac_done},\
len(futs) == {len(futs)},\
self._q.qsize() == {self._q.qsize()},\
ETA == {seconds_to_string(time_remaining*3600) if time_remaining is not None else 'N/A'}"""
)
except KeyboardInterrupt:
break
print('done')
```
#### File: Ludwig-Ahgren-Subathon-OCR/src/image_processing.py
```python
from __future__ import annotations
__doc__ = """
Process a single frame of the stream and get the timer value.
"""
import os
import re
from datetime import datetime
from math import isclose
from typing import Iterable, Optional
import cv2
import numpy as np
import pytesseract
class CroppedOcrError(Exception):
pass
class TimeStringParsingError(Exception):
pass
class LudFrame():
TIME_REGEX = re.compile(r'(\d{1,2})[:\.]?(\d{2})[:\.]?(\d{2})')
def __init__(self, arr: np.ndarray, record_ims=False, bbox=None):
self.record_ims = record_ims
self.img = arr
self.bbox = bbox # [x, y, w, h]
self.cropped = None
self._write("image.png", arr)
def _write(self, name, im):
if self.record_ims:
cv2.imwrite(os.path.join("demo_images", name), im)
@classmethod
def get_timestamp_from_str(cls, s: str) -> Optional[int]:
cleaned = "".join(s.split()) # no whitespace
res = cls.TIME_REGEX.findall(cleaned)
if len(res) != 1:
raise TimeStringParsingError(f"cleaned: {cleaned}")
hour, minute, second = res[0]
timestamp = 60 * 60 * int(hour) + 60 * int(minute) + int(second)
return timestamp, ":".join(res[0])
def get_str_from_cropped(self) -> str:
"""
Convert a cropped image to a timestamp-convertible string.
grayscale -> threshold -> filter out small shapes -> inversion -> tesseract OCR
"""
if self.cropped is None:
raise CroppedOcrError
im_size = self.cropped.shape[0] * self.cropped.shape[1]
min_area_thresh = (im_size)**0.5 / 5
if min_area_thresh < 12:
min_area_thresh = 4
try:
grayscale = cv2.cvtColor(self.cropped, cv2.COLOR_BGR2GRAY)
except cv2.error:
raise CroppedOcrError
self._write('grayscale.png', grayscale)
thresh = 80
grayscale[grayscale < thresh] = 0 # TODO
grayscale[grayscale >= thresh] = 255
black_white = grayscale
self._write('black_white.png', black_white)
# https://stackoverflow.com/questions/42798659/how-to-remove-small-connected-objects-using-opencv
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(black_white, connectivity=8)
sizes = stats[1:, -1]
filtered = np.zeros(output.shape, np.uint8)
#for every component in the image, you keep it only if it's above min_size
for i, size in enumerate(sizes):
if size >= min_area_thresh:
filtered[output == i + 1] = 255
self._write('filtered.png', filtered)
inverted = cv2.bitwise_not(filtered)
self._write('inverted.png', inverted)
string = pytesseract.image_to_string(inverted, 'eng', config='-c tessedit_char_whitelist=0123456789:.')
return string
def update_bbox(
self,
color: Iterable[int] = [67, 12, 21],
color_threshold: int = 15,
max_perimeter_tolerance: float = 0.20,
min_area_factor: float = 0.01,
morph_close_len: int = 3,
border_width: int = 5
):
"""
Get the timer portion of the frame.
inRange mask -> Canny edge detection -> morphological dilate to complete path ->
approximate rectangular contour with large area that fits well in its bounding box -> coordinates of bounding box
"""
color = np.array(color) # BGR
min_color, max_color = color - color_threshold, color + color_threshold
mask = cv2.inRange(self.img, min_color, max_color)
self._write('mask.png', mask)
canny = cv2.Canny(mask, 127, 255)
self._write('canny.png', canny)
closed = cv2.dilate(canny, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_close_len,morph_close_len)))
self._write('closed.png', closed)
contours, _ = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
approx_contours = [cv2.approxPolyDP(c, 0.1 * cv2.arcLength(c, True), True) for c in contours]
best_bounding_box = [0, 0, 0, 0] # x, y, width, height
best_peri_deviance = 1.0
im_height, im_width, *_ = self.img.shape
min_area = min_area_factor * im_height * im_width
for cont in approx_contours:
rect = cv2.boundingRect(cont)
x, y, width, height = rect
box_peri = cv2.arcLength(cont, True)
cont_peri = 2 * width + 2 * height
area = width * height
if area >= min_area \
and isclose(cont_peri, box_peri, rel_tol=max_perimeter_tolerance) \
and isclose(cont_peri, box_peri, rel_tol=best_peri_deviance) \
and not isclose(width, im_width, rel_tol=0.1) \
and not isclose(height, im_height, rel_tol=0.1):
best_bounding_box = rect
best_peri_deviance = abs(cont_peri - box_peri) / box_peri
self.bbox = best_bounding_box
self.crop()
return self.bbox
def crop(self, bounding_box=None):
bbox = bounding_box if bounding_box else self.bbox
x, y, w, h = bbox
self.cropped = self.img[y:y+h, x:x+w]
if __name__ == '__main__':
import pafy
# (3, 6, 272, 157)
#stream = pafy.new('https://www.youtube.com/watch?v=UzHtbjtT8hE').getbestvideo()
#cap = cv2.VideoCapture(stream.url)
#cap.set(1, 61320-1)
#_, frame = cap.read()
lf = LudFrame(cv2.imread('demo_images/image.png'), True)
print(lf.update_bbox())
print(lf.get_str_from_cropped())
``` |
{
"source": "2435191/marco-rubio-bible-tweets",
"score": 4
} |
#### File: 2435191/marco-rubio-bible-tweets/main.py
```python
import twint
import re
import os
import pandas
import datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
import json
# from wikipedia, map abbreviations to full bible verse names
with open('verses.json') as verses_json:
verses_map = json.load(verses_json)
def is_bible_verse(s: str) -> bool:
"""
Does a text contain reference to a part of the Bible?
example: is_bible_verse("text filler blah blah 1 Mark 23: 45-67 ") -> True
"""
re_pattern = r"((?:\d +)?[A-Za-z]+) +(\d\d?) +(\d\d?-\d\d?|\d\d?)(?: +|$)"
s = s.replace(';', ' ').replace(':', ' ')
res = re.findall(re_pattern, s, flags=re.IGNORECASE)
return bool(res)
def get_tweets() -> pandas.DataFrame:
"""
Get all of @marcorubio's tweets that have a Bible verse (or abbreviation) and put them in a DataFrame.
"""
df = pandas.DataFrame(columns=["id", "verse", "tweet", 'datetime', 'date', "link", "replies_count", "retweets_count", "likes_count"]).set_index('id')
for verse in verses_map.keys():
print(verse)
twint.output.clean_lists()
c = twint.Config()
c.Username = "marcorubio"
c.Search = verse
c.Filter_retweets = True
c.Store_object = True
twint.run.Search(c)
tweets = twint.output.tweets_list
for tweet in tweets:
df.loc[tweet.id, :] = [
verse,
tweet.tweet,
tweet.datetime,
tweet.datestamp,
tweet.link,
tweet.replies_count,
tweet.retweets_count,
tweet.likes_count
]
df['verse_full'] = df['verse'].map(verses_map)
df['is_verse'] = df['tweet'].apply(is_bible_verse)
return df
def get_number_tweets_for_month(ts: datetime.datetime) -> int:
"""
Get the total number of tweets that @marcorubio made in `ts.month`.
TODO: Possibly approximate, maybe splitting into two tasks (two weeks each) would fix.
"""
c = twint.Config()
c.Username = "marcorubio"
c.Filter_retweets = True
c.Since = ts.replace(day=1).strftime('%Y-%m-%d')
# yeah this part's a bit janky, especially the day=2 bit. Don't know of a fix, and returns too few results otherwise
c.Until = ts.replace(day=2, month=ts.month%12+1, year=ts.year+1 if ts.month==12 else None).strftime('%Y-%m-%d')
c.Store_object = True
twint.run.Search(c)
lst = twint.output.tweets_list
twint.output.clean_lists()
return len(lst)
def group(df: pandas.DataFrame) -> pandas.DataFrame:
"""
Do some cleaning operations and get the total number of tweets for each month.
"""
df['date'] = pandas.to_datetime(df['date'])
df.reset_index(inplace=True)
df = df['tweet'].groupby(df['date'].dt.to_period('M')).count().resample('M').asfreq().fillna(0).to_frame()
df.index = df.index.to_timestamp()
df.rename({'tweet': 'bible_tweet_count'}, axis=1, inplace=True)
df['total_tweets'] = df.index.map(get_number_tweets_for_month)
return df
if __name__ == "__main__":
if os.path.exists('data.csv'):
df = pandas.read_csv('data.csv')
else:
df = get_tweets()
df.to_csv('data.csv')
grouped = group(df)
grouped.to_csv('grouped.csv')
``` |
{
"source": "2435191/MyAutoClicker",
"score": 3
} |
#### File: 2435191/MyAutoClicker/AutoClicker.py
```python
import rumps
from pynput import mouse, keyboard
class AutoClicker(rumps.App):
def __init__(self):
super().__init__(name="AutoClicker", icon="mouselogo.icns")
default_value = 0.0
self.cps_value = AutoClicker.map_slider_to_cps(default_value)
self.click_timer = rumps.Timer(lambda _ : self.mouse.click(mouse.Button.left), 1/self.cps_value)
def timer_start(_):
#print(rumps.timers())
self.click_timer.start()
self.menu = [
rumps.MenuItem("Start", timer_start),
None,
"Stop key: esc",
"Start key: \\",
None,
"cps_value",
rumps.SliderMenuItem(dimensions=(180,15), callback=self.change_cps, min_value = -1, max_value=5, value=default_value),
None
]
self.menu["cps_value"].title = f"CPS: {self.cps_value}"
self.mouse = mouse.Controller()
self.check_keys()
@staticmethod
def map_slider_to_cps(slide_val):
base = 2
if base**slide_val > 10:
return int(base**slide_val)
return round(base**slide_val, 2)
def change_cps(self, slider):
# map slider.value non-linearly to cps_value
self.cps_value = AutoClicker.map_slider_to_cps(slider.value)
self.menu["cps_value"].title = f"CPS: {self.cps_value}"
self.click_timer.interval = 1/self.cps_value
def check_keys(self):
def on_press(key):
if key == keyboard.Key.esc:
#print("stop")
for t in rumps.timers(): # just in case things get out of sync
t.stop()
if key == keyboard.KeyCode(char='\\'):
self.click_timer.start()
self.listener = keyboard.Listener(
on_press = on_press
)
self.listener.start()
AutoClicker().run()
``` |
{
"source": "2435191/python-getset",
"score": 3
} |
#### File: python-getset/paam/__init__.py
```python
from typing import Any, Callable, List
from abc import ABC
class _PaamBaseDescriptor(ABC):
def __init__(self, obj: Any) -> None:
self.is_setter = isinstance(obj, Setter) or isinstance(self, Setter)
self.is_getter = isinstance(obj, Getter) or isinstance(self, Getter)
self.is_deleter = isinstance(obj, Deleter) or isinstance(self, Deleter)
self._obj = obj
def __get__(self, owner_instance, owner_cls=None):
raise ValueError(
f"{owner_instance if owner_cls is None else owner_cls} is not gettable"
)
def __set__(self, owner_instance, value):
raise ValueError(
f"{owner_instance} is not settable"
)
def __delete__(self, owner_instance):
raise ValueError(
f"{owner_instance} is not deletable"
)
class Setter(_PaamBaseDescriptor):
def __set__(self, owner_instance, value):
self._obj = value
class Getter(_PaamBaseDescriptor):
def __get__(self, owner_instance, owner_cls=None):
return self._obj
class Deleter(_PaamBaseDescriptor):
def __delete__(self, owner_instance):
del self._obj
class _PropertyAccessModifierFactory:
def __init__(self, cls: type) -> None:
self.cls = cls
def __ror__(self, other: Any) -> _PaamBaseDescriptor:
if issubclass(other.__class__, _PaamBaseDescriptor):
class Both(self.cls, other.__class__):
pass
return Both(other._obj)
return self.cls(other)
SET = _PropertyAccessModifierFactory(Setter)
GET = _PropertyAccessModifierFactory(Getter)
DEL = _PropertyAccessModifierFactory(Deleter)
# read https://dev.to/mattconway1984/python-creating-instance-properties-2ej0
# for more
class PaamBase:
def __setattr__(self, attr_name: str, value: Any) -> None:
try:
attr = super().__getattribute__(attr_name) # avoid recursion this way
except AttributeError: # must be "normal" attribute
super().__setattr__(attr_name, value)
else:
if issubclass(type(attr), _PaamBaseDescriptor):
attr.__set__(self, value)
else:
super().__setattr__(attr_name, value) # if "normal" attribute
def __getattribute__(self, attr_name: str) -> Any:
attr = super().__getattribute__(attr_name)
if issubclass(type(attr), _PaamBaseDescriptor):
return attr.__get__(self, self.__class__)
return attr
def __delattr__(self, attr_name: str) -> None:
attr = super().__getattribute__(attr_name)
if issubclass(type(attr), _PaamBaseDescriptor):
attr.__delete__(self)
del attr
``` |
{
"source": "2435191/TracerouteVisualization",
"score": 2
} |
#### File: 2435191/TracerouteVisualization/geolocate.py
```python
import subprocess
import re
import threading
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import cartopy.crs as ccrs
import time
import requests
import json
import sys, os
import builtins
import socket
"""
https://stackoverflow.com/questions/14986490/python-change-sys-stdout-print-to-custom-print-function
On my mac, FuncAnimation causes a bunch of repeated diagnostic errors into stderr (not an Exception, mind you).
This is a hacky solution.
"""
class FilterPrint():
def __init__(self):
self._old_stderrr = sys.stderr
def write(self, text):
if text == "IllegalArgumentException: Argument must be Polygonal or LinearRing":
return
self._old_stderr.write(text)
def flush(self):
self.old_stderr.flush()
sys.stderr = FilterPrint()
try:
url = sys.argv[1]
except IndexError:
url = "www.u-tokyo.ac.jp"
target_ip = socket.gethostbyname(url)
lat_lst = []
lon_lst = []
def get_coords_worker(ip, prev_thread):
global lat_lst, lon_lst
response = requests.get(f"http://ip-api.com/json/{ip}").json()
if prev_thread is not None:
prev_thread.join() # make sure coordinates are displayed in order
if response['status'] == 'fail':
return
lat, lon = response['lat'], response['lon']
lat_lst.append(lat)
lon_lst.append(lon)
return lat, lon
def plot_coords():
sp = subprocess.Popen(f"traceroute {url}", shell=True, stdout=subprocess.PIPE)
threads = []
thread_ = None
fail_counter = 0
while True:
if sp.poll() is not None or fail_counter >= 3:
break
line = sp.stdout.readline().decode('utf-8')
re_search = re.search('\(((?:\d+\.?)+)\)', line)
if re_search is None:
fail_counter += 1
continue
fail_counter = 0
ip = re_search.group(1)
if ip == target_ip:
break
t = threading.Thread(target=get_coords_worker, args=[ip, thread_], daemon=True)
t.start()
thread_ = t
threads.append(t)
print('done')
anim.event_source.stop()
class LowerThresholdGeodetic(ccrs.Geodetic): # make lines look smoother
@property
def threshold(self):
return 1e3
fig = plt.figure()
ax = plt.axes(projection=ccrs.Robinson())
ax.stock_img()
line, = ax.plot([], [], c='black', marker='o', lw=0.5, markersize=2, transform=LowerThresholdGeodetic())
def init():
line.set_data([], [])
return line,
def animate(_):
line.set_data(lon_lst, lat_lst)
return line,
t = threading.Thread(target=plot_coords, daemon=True)
t.start()
anim = FuncAnimation(fig, animate, init_func=init, save_count=600)
#anim.save('japan.gif',writer='imagemagick',fps=20) # use imagemagick to crop
plt.show()
``` |
{
"source": "2439905184/pyRequestTool",
"score": 4
} |
#### File: 2439905184/pyRequestTool/post.py
```python
from urllib import response
import requests
import json
#测试工具
domain = "";
def read_config():
file = open("config.json")
json_data = json.load(file)
return json_data['domain']
pass
#请用phpstudy把域名修改为student 端口:1234 方便大家测试
print("请选择是否使用config.json中的配置信息, y/n")
use_config = input()
if use_config == "y":
domain = read_config()
else:
print("请输入临时域名参数如: http://test:1234/")
domain = input()
print("是否启用附加参数, y/n")
enable_param = input()
if enable_param == "y":
print("请输入附加参数。格式:@p 参数名 参数值 每个命令直接一个空格")
print("输入完成输入 @finish 来结束设置")
result_params = []
while(True):
param = input()
if param != "@finish":
split_param = param.split(" ")
print(split_param)
params = {}
params['param'] = split_param[1]
params['value'] = split_param[2]
result_params.append(params)
if param == "@finish":
break
params_data = {}
print("每一个参数")
for item in result_params:
print(item)
key = item['param']#获取参数名 参数值
value = item['value']
#赋值最终参数
params_data[key] = item['value']
print("附加参数为:")
print(params_data)
response = requests.post(domain,data=params_data)
print("结果")
print(response.text)
else:
response = requests.post(domain)
print(response.text)
``` |
{
"source": "2443391447/nonebot2",
"score": 2
} |
#### File: src/plugins/sjsy.py
```python
from nonebot import on_keyword, on_command
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Message, Bot, Event # 这两个没用的别删
from nonebot.adapters.cqhttp.message import MessageSegment
import requests
from nonebot.permission import *
from nonebot.rule import to_me
from aiocqhttp.exceptions import Error as CQHttpError
sheying = on_keyword({'随机摄影'})
@sheying.handle()
async def main(bot: Bot, event: Event, state: T_State):
msg = await downloads()
try:
await sheying.send(message=Message(msg))
except CQHttpError:
pass
async def downloads():
url = "https://yanghanwen.xyz/tu/ren.php"
resp = requests.get(url).json()
url_ing = resp['data']
xians = f"[CQ:image,file={url_ing}]"
return xians
# await xians.send("正在爬取图片,请稍后……")
# await xians.send(MessageSegment.at(id) + xians + "精选摄影")
``` |
{
"source": "2448845600/easy_pytorch_yolov1",
"score": 3
} |
#### File: easy_pytorch_yolov1/data/dataset.py
```python
import os
import sys
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import cv2
class YOLODataset(data.Dataset):
image_size = 224
def __init__(self, root, list_file, train, transform):
print("数据初始化")
self.root = root
self.train = train
self.transform = transform # 对图像转化
self.fnames = []
self.boxes = []
self.labels = []
self.mean = (123, 117, 104)
with open(list_file) as f:
lines = f.readlines()
# 遍历voc2012train.txt的每一行
# name num_faces x y x y c ......
# 转换后,每个index对应一组 box label
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_faces = int(splited[1]) # 一张图像中真实框的总数
box = []
label = []
for i in range(num_faces):
x = float(splited[2 + 5 * i])
y = float(splited[3 + 5 * i])
x2 = float(splited[4 + 5 * i])
y2 = float(splited[5 + 5 * i])
c = splited[6 + 5 * i]
box.append([x, y, x2, y2])
label.append(int(c) + 1)
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
self.num_samples = len(self.boxes) # 数据集中图像的总数
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(self.root + fname))
# clone 深复制,不共享内存
# 拿出对应的bbox及 标签对应的序号
boxes = self.boxes[idx].clone()
labels = self.labels[idx].clone()
# Todo
# 如果为训练集,进行数据增强
# if self.train:
# img, boxes = self.random_flip(img, boxes)
h, w, _ = img.shape
boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes) # x, y, w, h, 除以图片的长宽
img = self.BGR2RGB(img)
img = self.subMean(img, self.mean)
img = cv2.resize(img, (self.image_size, self.image_size))
target = self.encoder(boxes, labels) # 7x7x30
for t in self.transform: # 图像转化
img = t(img)
return img, target
def __len__(self):
return self.num_samples
def encoder(self, boxes, labels):
"""
将每张图片对应的box信息编码为 (7,7,30)的target Tensor
:param boxes: (tensor) [[x1,y1,x2,y2],[x1,y1,x2,y2],[]],注意,数值已经被规格化了,数值在0-1之间。
:param labels: (tensor) [...]
:return: 7x7x30,x,y转化为box中心到cell左上顶点的相对位置。w,h的取值相对于整幅图像的尺寸
"""
target = torch.zeros((7, 7, 30))
cell_size = 1. / 7
wh = boxes[:, 2:] - boxes[:, :2]
cxcy = (boxes[:, 2:] + boxes[:, :2]) / 2
for i in range(cxcy.size()[0]): # cxcy.size()[0]是框的数量
cxcy_sample = cxcy[i] # 取其中一个box
# cxcy_sample为一个物体的中心点坐标,求该坐标位于7x7网格的哪个网格
# cxcy_sample坐标在0-1之间 现在求它再0-7之间的值,故乘以7
# ij长度为2,代表7x7个cell中的某个cell,负责预测一个物体
ij = (cxcy_sample / cell_size).ceil() - 1 # ceil 向上取整
# 每行的第4和第9的值设置为1,即每个网格提供的两个真实候选框 框住物体的概率是1.
# xml中坐标理解:原图像左上角为原点,右边为x轴,下边为y轴。
# 而二维矩阵(x,y) x代表第几行,y代表第几列
# 假设ij为(1,2) 代表x轴方向长度为1,y轴方向长度为2
# 二维矩阵取(2,1) 从0开始,代表第2行,第1列的值
target[int(ij[1]), int(ij[0]), 4] = 1
target[int(ij[1]), int(ij[0]), 9] = 1
target[int(ij[1]), int(ij[0]), int(labels[i]) + 9] = 1
# cxcy_sample:第i个bbox的中心点坐标
# cell_lt_xy:对应cell的左上角相对坐标(相对于图片长宽(224*224)的比例,0-1之间)
# delta_xy:真实框的中心点坐标相对于该中心点所在cell左上角的相对坐标。
# cxcy_sample - cell_lt_xy肯定小于1/7,否则box与cell不对应,故*7,扩展到0-1,猜测是为了便于收敛。
cell_lt_xy = ij * cell_size
delta_xy = (cxcy_sample - cell_lt_xy) / cell_size
# x,y代表了检测框中心相对于cell边框的坐标。w,h的取值相对于整幅图像的尺寸
target[int(ij[1]), int(ij[0]), 2:4] = wh[i]
target[int(ij[1]), int(ij[0]), :2] = delta_xy
target[int(ij[1]), int(ij[0]), 7:9] = wh[i]
target[int(ij[1]), int(ij[0]), 5:7] = delta_xy
return target
def BGR2RGB(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def subMean(self, bgr, mean):
mean = np.array(mean, dtype=np.float32)
bgr = bgr - mean
return bgr
def random_flip(self, im, boxes):
'''
随机翻转
'''
if random.random() < 0.5:
im_lr = np.fliplr(im).copy()
h, w, _ = im.shape
xmin = w - boxes[:, 2]
xmax = w - boxes[:, 0]
boxes[:, 0] = xmin
boxes[:, 2] = xmax
return im_lr, boxes
return im, boxes
```
#### File: easy_pytorch_yolov1/model/yolo.py
```python
import torch.nn as nn
from model.net import vgg16_bn
'''
VGG16的输入是224*224*3,YOLO的输入是448*448*3,所以vgg16_bn().features的输出是14*14*512,
故增加一层池化nn.AdaptiveAvgPool2d((7, 7)),YOLO最后的输出还是7*7*30
'''
class YOLO(nn.Module):
def __init__(self, num_classes=20, init_weights=True):
super(YOLO, self).__init__()
self.basic_net = vgg16_bn().features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) # 自适应平均池化
self.detector_head = nn.Sequential(
nn.Linear(7 * 7 * 512, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 7 * 7 * (2 * 5 + num_classes))
)
self.init_head()
def init_head(self):
for layer in self.detector_head:
if isinstance(layer, nn.Linear):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, input):
x = self.basic_net(input)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
output = self.detector_head(x)
return output
if __name__ == '__main__':
yolo = YOLO()
print(yolo)
```
#### File: easy_pytorch_yolov1/util/iou.py
```python
import torch
def compute_iou(box1, box2):
"""
Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].
:param box1: (tensor) bounding boxes, sized [N,4].
:param box2: (tensor) bounding boxes, sized [M,4].
:return: (tensor) iou, sized [N,M].
"""
N = box1.size(0)
M = box2.size(0)
lt = torch.max(
box1[:, :2].unsqueeze(1).expand(N, M, 2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:, :2].unsqueeze(0).expand(N, M, 2), # [M,2] -> [1,M,2] -> [N,M,2]
)
rb = torch.min(
box1[:, 2:].unsqueeze(1).expand(N, M, 2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:, 2:].unsqueeze(0).expand(N, M, 2), # [M,2] -> [1,M,2] -> [N,M,2]
)
wh = rb - lt # [N,M,2]
wh[wh < 0] = 0 # clip at 0
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
area1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) # [N,]
area2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) # [M,]
area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]
area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]
iou = inter / (area1 + area2 - inter)
return iou
if __name__ == '__main__':
print("test of unsqueeze and expand/expand_as")
m = torch.zeros(1)
n = torch.zeros(2, 2, 3, 5)
print("m %s" % str(m.size()))
print("n %s" % str(n.size()))
m = m.unsqueeze(1).expand(2, 2, 3)
print("\nafter m.unsqueeze(1).expand(2, 2, 3), \nm %s" % str(m.size()))
m = m.unsqueeze(-1).expand_as(n)
print("\nthen m.unsqueeze(-1).expand_as(n), \nm %s" % str(m.size()))
``` |
{
"source": "24601/accumulo",
"score": 2
} |
#### File: system/auto/JavaTest.py
```python
import os
import unittest
import time
import logging
from subprocess import PIPE
from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, INSTANCE_NAME
log = logging.getLogger('test.auto')
class JavaTest(TestUtilsMixin, unittest.TestCase):
"Base class for Java Functional Test"
order = 21
testClass=""
maxRuntime = 120
def setUp(self):
handle = self.runJTest('localhost','getConfig')
out,err = handle.communicate()
log.debug(out)
log.debug(err)
assert handle.returncode==0
self.settings = TestUtilsMixin.settings.copy()
self.settings.update(eval(out))
TestUtilsMixin.setUp(self);
handle = self.runJTest(self.masterHost(),'setup')
out,err = handle.communicate()
log.debug(out)
log.debug(err)
assert handle.returncode==0
def runJTest(self,host, cmd):
return self.runClassOn(host, 'org.apache.accumulo.test.functional.FunctionalTest', ['-u',ROOT,'-p',ROOT_PASSWORD,'-i',INSTANCE_NAME,'--classname', self.testClass,'--opt', cmd])
def runTest(self):
handle = self.runJTest(self.masterHost(),'run')
self.waitForStop(handle, self.maxRuntime)
handle = self.runJTest(self.masterHost(),'cleanup')
out,err = handle.communicate()
log.debug(out)
log.debug(err)
assert handle.returncode==0
self.shutdown_accumulo()
```
#### File: auto/simple/dynamicThreadPools.py
```python
import os
import logging
import unittest
import time
import sys
from simple.readwrite import SunnyDayTest
from TestUtils import TestUtilsMixin
log = logging.getLogger('test.auto')
class DynamicThreadPools(SunnyDayTest):
'Verify we can change thread pool sizes after the servers have run'
order = 50
settings = SunnyDayTest.settings.copy()
settings.update({
'tserver.compaction.major.delay': 1,
})
def setUp(self):
TestUtilsMixin.setUp(self);
# initialize the database
self.createTable('test_ingest')
# start test ingestion
log.info("Starting Test Ingester")
self.ingester = self.ingest(self.masterHost(),
self.options.rows*10,
size=self.options.size)
def runTest(self):
self.waitForStop(self.ingester, self.waitTime())
# make a bunch of work for compaction
out, err, returncode = self.shell(self.masterHost(),
'clonetable test_ingest test_ingest1\n'
'clonetable test_ingest test_ingest2\n'
'clonetable test_ingest test_ingest3\n'
'clonetable test_ingest test_ingest4\n'
'clonetable test_ingest test_ingest5\n'
'clonetable test_ingest test_ingest6\n'
'config -s tserver.compaction.major.concurrent.max=1\n'
'sleep 10\n'
'compact -p .*\n'
'sleep 2\n')
log.info('shell says: ' + out)
count = 0
while count == 0:
handle = self.runOn(self.masterHost(), [self.accumulo_sh(), 'org.apache.accumulo.test.GetMasterStats', '-u', 'root'])
out, err = self.waitForStop(handle, 120)
for line in out.split('\n'):
if line.find('Major Compacting') >= 0:
count += int(line.split()[2])
self.assert_(count == 1)
def suite():
result = unittest.TestSuite()
result.addTest(DynamicThreadPools())
return result
```
#### File: auto/simple/masterFailover.py
```python
from simple.readwrite import SunnyDayTest
import logging
log = logging.getLogger('test.auto')
import unittest
class MasterFailover(SunnyDayTest):
"Test master automatic master fail-over"
order = 85
def start_master(self, host, safeMode=None):
goalState = 'NORMAL'
if safeMode:
goalState = 'SAFE_MODE'
self.wait(self.runOn('localhost',
[self.accumulo_sh(),
'org.apache.accumulo.server.master.state.SetGoalState',
goalState]))
return self.runOn(host, [self.accumulo_sh(), 'master', 'dooomed'])
def runTest(self):
waitTime = self.waitTime()
self.waitForStop(self.ingester, waitTime)
handle = self.start_master(self.masterHost())
self.pkill(self.masterHost(), 'doomed')
self.sleep(2)
self.shutdown_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(MasterFailover())
return result
```
#### File: auto/simple/start.py
```python
import unittest
import os
from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, ACCUMULO_DIR
from subprocess import PIPE
class Start(TestUtilsMixin, unittest.TestCase):
order = 21
def start(self, *args):
handle = self.runOn(self.masterHost(),
[self.accumulo_sh(), 'org.apache.accumulo.start.TestMain'] + list(args), stdin=PIPE)
out, err = handle.communicate('')
return handle.returncode
def runTest(self):
assert self.start() != 0
assert self.start('success') == 0
assert self.start('exception') != 0
def suite():
result = unittest.TestSuite()
result.addTest(Start())
return result
```
#### File: auto/stress/halfDead.py
```python
import os
import time
import signal
import unittest
from simple.readwrite import SunnyDayTest
from TestUtils import ACCUMULO_HOME
import logging
log = logging.getLogger('test.auto')
class TabletServerHangs(SunnyDayTest):
order = 25
# connections should timeout quickly for faster tests
settings = SunnyDayTest.settings.copy()
settings['general.rpc.timeout'] = '5s'
settings['instance.zookeeper.timeout'] = '15s'
def start_tserver(self, host):
log.info("Starting tserver we can pause with bad read/writes")
libpath = '%s/test/system/auto/fake_disk_failure.so' % ACCUMULO_HOME
os.environ['LD_PRELOAD'] = libpath
os.environ['DYLD_INSERT_LIBRARIES'] = libpath
os.environ['DYLD_FORCE_FLAT_NAMESPACE'] = 'true'
self.stop = self.runOn(self.masterHost(), [self.accumulo_sh(), 'tserver'])
del os.environ['LD_PRELOAD']
del os.environ['DYLD_FORCE_FLAT_NAMESPACE']
del os.environ['DYLD_INSERT_LIBRARIES']
self.flagFile = os.getenv("HOME") + "/HOLD_IO_%d" % self.stop.pid
log.debug("flag file is " + self.flagFile)
return self.stop
def runTest(self):
waitTime = self.waitTime()
log.info("Waiting for ingest to stop")
self.waitForStop(self.ingester, waitTime)
MANY_ROWS = 500000
self.ingester = self.ingest(self.masterHost(),
MANY_ROWS,
size=self.options.size)
# wait for the ingester to get going
for i in range(100):
line = self.ingester.stdout.readline()
if line == '' or line.find(' sent ') > 0:
break
log.info("Starting faking disk failure for tserver")
fp = open(self.flagFile, "w+")
fp.close()
self.sleep(10)
log.info("Ending faking disk failure for tserver")
os.unlink(self.flagFile)
# look for the log message that indicates a timeout
out, err = self.waitForStop(self.ingester, waitTime)
self.assert_(out.find('requeuing') >= 0)
log.info("Verifying Ingestion")
self.waitForStop(self.verify(self.masterHost(),
MANY_ROWS,
size=self.options.size),
waitTime)
os.kill(self.stop.pid, signal.SIGHUP)
# look for the log message that indicates the tablet server stopped for a while
out, err = self.stop.communicate()
self.assert_(err.find('sleeping\nsleeping\nsleeping\n') >= 0)
def tearDown(self):
SunnyDayTest.tearDown(self)
try:
os.unlink(self.flagFile)
except:
pass
def suite():
result = unittest.TestSuite()
result.addTest(TabletServerHangs())
return result
```
#### File: auto/stress/migrations.py
```python
import os
import logging
import unittest
import time
import sleep
from TestUtils import TestUtilsMixin, ACCUMULO_DIR
log = logging.getLogger('test.auto')
from simple.readwrite import SunnyDayTest, Interleaved
from simple.delete import DeleteTest
class ChaoticBalancerIntegrity(SunnyDayTest):
"""Start a new table, create many splits, and attempt ingest while running a crazy load balancer"""
order = 90
settings = TestUtilsMixin.settings.copy()
settings.update({
'tserver.memory.maps.max':'10K',
'tserver.compaction.major.delay': 0,
'table.balancer':'org.apache.accumulo.server.master.balancer.ChaoticLoadBalancer',
})
tableSettings = SunnyDayTest.tableSettings.copy()
tableSettings['test_ingest'] = {
'table.split.threshold': '10K',
}
def setUp(self):
# ensure we have two servers
if len(self.options.hosts) == 1:
self.options.hosts.append('localhost')
self.options.hosts = self.options.hosts[:2]
TestUtilsMixin.setUp(self);
# create a table with 200 splits
import tempfile
fileno, filename = tempfile.mkstemp()
fp = os.fdopen(fileno, "wb")
try:
for i in range(200):
fp.write("%08x\n" % (i * 1000))
finally:
fp.close()
self.createTable('unused', filename)
# create an empty table
self.createTable('test_ingest')
def runTest(self):
# start test ingestion
log.info("Starting Test Ingester")
self.ingester = self.ingest(self.masterHost(),
200000,
size=self.options.size)
self.waitForStop(self.ingester, 120)
self.shell(self.masterHost(), 'flush -t test_ingest')
self.waitForStop(self.verify(self.masterHost(), self.options.rows), 60)
def suite():
result = unittest.TestSuite()
result.addTest(ChaoticBalancerIntegrity())
return result
```
#### File: bench/lib/TableSplitsBenchmark.py
```python
import unittest
import subprocess
import os
import glob
import random
import time
from lib import cloudshell, runner, path
from lib.Benchmark import Benchmark
from lib.slaves import runEach, slaveNames
from lib.path import accumulo, accumuloJar
from lib.util import sleep
from lib.options import log
class TableSplitsBenchmark(Benchmark):
"Creating a table with predefined splits and then deletes it"
splitsfile = 'slowsplits'
tablename = 'test_splits'
def setUp(self):
# Need to generate a splits file for each speed
code, out, err = cloudshell.run(self.username, self.password, 'table %s\n' % self.tablename)
if out.find('does not exist') == -1:
log.debug('Deleting table %s' % self.tablename)
code, out, err = cloudshell.run(self.username, self.password, 'deletetable %s -f\n' % self.tablename)
self.assertEqual(code, 0, "Could not delete table")
Benchmark.setUp(self)
def runTest(self):
command = 'createtable %s -sf %s\n' % (self.tablename, self.splitsfile)
log.debug("Running Command %r", command)
code, out, err = cloudshell.run(self.username, self.password, command)
self.assertEqual(code, 0, 'Could not create table: %s' % out)
return code, out, err
def shortDescription(self):
return 'Creates a table with splits. Lower score is better.'
def tearDown(self):
command = 'deletetable %s -f\n' % self.tablename
log.debug("Running Command %r", command)
code, out, err = cloudshell.run(self.username, self.password, command)
self.assertEqual(code, 0, "Could not delete table")
log.debug("Process finished")
Benchmark.tearDown(self)
def setSpeed(self, speed):
dir = os.path.dirname(os.path.realpath(__file__))
if speed == "slow":
splitsfile = 'slowsplits'
elif speed == "medium":
splitsfile = 'mediumsplits'
else: # speed == "fast"
splitsfile = 'fastsplits'
self.splitsfile = os.path.join( dir, splitsfile)
def needsAuthentication(self):
return 1
``` |
{
"source": "2462612540/Large-file-transfer",
"score": 2
} |
#### File: v_1_0/app/file_split.py
```python
import sys
from v_1_0.utils.file_utils import *
from datetime import datetime
# set file split size
mminbytes = 1024
megabytes = mminbytes * 1000
chunksize = 1000 * megabytes
# set up logger
handler = logging.StreamHandler()
logger = logging.getLogger(__name__)
# 记录文件name and md5和切割文件的md5 value
file_json = {}
def file_split(splitfiledir, joindir, chunksize):
if not os.path.exists(joindir):
os.mkdir(joindir)
else:
#如果有的话 删除其他这个目录下的文件
for fname in os.listdir(joindir):
os.remove(os.path.join(joindir, fname))
# 获取文件的名字和md5
splitfilename=file_name(splitfiledir)
file_json["splitfilename"]=splitfilename
file_json[splitfilename]=file_md5(splitfiledir)
# 开始切割
partnum = 0
inputfile = open(splitfiledir, 'rb')
while True:
chunk = inputfile.read(chunksize)
if not chunk:
break
partnum += 1
filename = os.path.join(joindir, ('part%04d' % partnum))
fileobj = open(filename, 'wb')
fileobj.write(chunk)
file_json[file_name(filename)] = file_md5(filename)
fileobj.close()
# 将file 文件属性转为json 同时写入到这个文件中
file_json_save(file_json, joindir,splitfilename)
return partnum
if __name__ == '__main__':
abspath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
splitfile = abspath + "\\resource\\NIM_4.7.520.26691291_1602487304.iso"
joindir = abspath + "\\bigfile\\savepth\\"
try:
print("----------------------------------------开始----------------------------------------")
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
parts = file_split(splitfile, joindir, chunksize)
print("----------------------------------------完成----------------------------------------")
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
except:
print('Error during split:')
print(sys.exc_info()[0], sys.exc_info()[1])
``` |
{
"source": "249110786/viql_test_git",
"score": 4
} |
#### File: viql_test_git/test/login.py
```python
def index(num1, num2):
sum = num1 + num2
return sum
index(10, 20)
a = 'hello world'
num1 = 10
num2 =20
num = 88
``` |
{
"source": "249606097/web",
"score": 2
} |
#### File: apps/comment/models.py
```python
from django.db import models
class Comment(models.Model):
user = models.ForeignKey('account.User', on_delete=models.PROTECT, verbose_name='用户')
content = models.CharField(max_length=200, verbose_name='内容', null=False, blank=False)
create_time = models.DateTimeField(verbose_name='时间', auto_now_add=True)
class Meta:
verbose_name = '评论'
verbose_name_plural = verbose_name
def __str__(self):
return self.user.username
```
#### File: apps/utils/response_processor.py
```python
import json
from django.http import HttpResponse
def process_response(content: dict) -> HttpResponse:
"""
将键值对字典 dict 转化为 JSON 格式, 并包装成 HttpResponse 对象.
:param content: 要进行转化的字典 dict
:return: 包装后的 HttpResponse 对象
"""
content = json.dumps(content)
return HttpResponse(content,
content_type='application/json;charset=utf-8',
status='200',
charset='utf-8')
```
#### File: apps/utils/validator.py
```python
import re
def validate_username(username: str) -> dict:
"""
1. 长度大于等于 4
2. 长度小于等于 20
3. 仅含有字母、数字、下划线
:param username: 用户名
:return: 返回状态码
"""
if len(username) < 4:
return {'code': '102', 'msg': '用户名应不少于 4 位'}
if len(username) > 20:
return {'code': '103', 'msg': '用户名应不多于 20 位'}
if re.search(r'[^A-Za-z0-9_]', username):
return {'code': '104', 'msg': '用户名仅能含有字母、数字和下划线'}
return {}
def validate_password(password: str) -> dict:
"""
1. 长度大于等于 6
2. 长度小于等于 20
3. 仅含合法字符 ASCII 33 ~ 126
4. 需含有数字
5. 需含有字母
:param password: 密码
:return: 返回状态码
"""
if len(password) < 6:
return {'code': '112', 'msg': '密码应不少于 6 位'}
if len(password) > 20:
return {'code': '113', 'msg': '密码应不多于 20 位'}
if not all(32 < ord(c) < 128 for c in password):
return {'code': '114', 'msg': '密码应仅包含合法字符'}
if not re.search(r'[0-9]', password):
return {'code': '115', 'msg': '密码必须包含数字'}
if not re.search(r'[A-Za-z]', password):
return {'code': '116', 'msg': '密码必须包含字母'}
return {}
def validate_email(email: str) -> dict:
"""
1. 邮箱格式错误
:param email: 邮箱
:return: 返回状态码
"""
if not re.search(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}$', email):
return {'code': '211', 'msg': '邮箱格式错误'}
return {}
``` |
{
"source": "24aitor/NicePrinter",
"score": 3
} |
#### File: src/NicePrinter/niceprinter.py
```python
import os
class Color:
underline = '\033[4m'
darkcyan = '\033[36m'
purple = '\033[95m'
yellow = '\033[93m'
green = '\033[92m'
cyan = '\033[96m'
blue = '\033[94m'
bold = '\033[1m'
red = '\033[91m'
end = '\033[0m'
def underline(string):
return Color.underline + string + Color.end
def darkcyan(string):
return Color.darkcyan + string + Color.end
def purple(string):
return Color.purple + string + Color.end
def yellow(string):
return Color.yellow + string + Color.end
def green(string):
return Color.green + string + Color.end
def cyan(string):
return Color.cyan + string + Color.end
def blue(string):
return Color.blue + string + Color.end
def bold(string):
return Color.bold + string + Color.end
def red(string):
return Color.red + string + Color.end
def colorsChars(string):
counter = 0
chars = 0
while (counter < len(string)) and (counter != -1):
counter = string.find('\033', counter)
if counter != -1:
chars += len(string[counter:string.find('m', counter)]) + 1
counter += 1
return chars
def center(string):
overflow = colorsChars(string)
return string.center(os.get_terminal_size().columns + (overflow))
def hr(percentage = 75, sym = '-'):
cols = (percentage/100) * os.get_terminal_size().columns
return center(sym * int(cols))
def box(string, hsym = '-', vsym = '|'):
line = (len(string) + 4 - colorsChars(string)) * hsym
return line + '\n' + vsym + ' ' + string + ' ' + vsym + '\n' + line
def bbox(string):
return box(string, '=', '‖')
def cbox(string, hsym = '-', vsym = '|'):
line = center((len(string) + 4 - colorsChars(string)) * hsym)
return line + '\n' + center(vsym + ' ' + string + ' ' + vsym) + '\n' + line
def cbbox(string):
return cbox(string, '=', '‖')
def title(string, percentage = 75, sym = '-'):
return '\n' + hr(percentage, sym) + '\n' + center(string) + '\n' + hr(percentage, sym) + '\n'
def listing(head, array, vsym = '|', hsym = '=', centered = False, boldHead = True):
array.insert(0, head)
size = 0
export = ''
array2 = []
[array2.append(str(val)) for ind, val in enumerate(array)]
array = array2
for index, value in enumerate(array):
if ((len(value) - colorsChars(value)) > size):
size = len(value) - colorsChars(value)
bar = (hsym * (size + 4))
export += center(bar) if (centered) else (bar)
var = vsym
for index, value in enumerate(array):
var = (vsym + ' ') if (centered) else ("\n" + vsym + ' ')
val = bold(value) if (index == 0 and boldHead) else value
var += (val + (' ' * (size - len(value) + colorsChars(value) )) + (' ' + vsym))
export += center(var) if (centered) else (var)
if ((index == 0)):
export += '\n'
export += center(bar) if (centered) else (bar)
export += '\n'
export += center(bar) if (centered) else (bar)
return export
def table(array, vsym = '|', hsym = '=', centered = False, boldHead = True):
sizes = []
export = ''
[sizes.append(0) for x in array[0]]
for subarray in array:
for index, value in enumerate(subarray):
if ((len(value) - colorsChars(value)) > sizes[index]):
sizes[index] = len(value) - colorsChars(value)
bar = (hsym * (sum(sizes) + len(sizes)*3 + 1))
for subindex, subarray in enumerate(array):
if subindex == 0:
export += center(bar) if (centered) else (bar)
export += ('\n')
var = vsym + ' '
for index, value in enumerate(subarray):
val = bold(value) if (subindex == 0 and boldHead) else value
var += (val + (' ' * (sizes[index] - len(value) + colorsChars(value) )) + (' ' + vsym + ' '))
var = var.strip()
export += center(var) if (centered) else (var)
if ((subindex == 0)):
export += '\n'
export += center(bar) if (centered) else (bar)
export += '\n'
export += center(bar) if (centered) else (bar)
return export
``` |
{
"source": "24apurv/django-quiz",
"score": 3
} |
#### File: quiz/scripts/reviews.py
```python
from django.core.mail import EmailMessage
from quiz.models import MyUser, UserAnswer, Answer
from docx import Document
from docx.shared import Inches
def run():
'''
This script creates an analysis of the answers given by the participants and mails it to them.
The analysis contains question, options, given answer and actual answer.
'''
userAnswers = UserAnswer.objects.all()
try:
for userAnswer in userAnswers:
document = Document()
document.add_heading('MCQ Review', 0)
questions = userAnswer.questions.split(',')
answers = userAnswer.answers.split(',')
for i,question_id in enumerate(questions):
question = Answer.objects.get(question_id=question_id)
document.add_heading('Question '+str(i+1)+'. '+str(question.question), level=3)
document.add_paragraph('')
document.add_paragraph('A. '+question.option_a)
document.add_paragraph('B. '+question.option_b)
document.add_paragraph('C. '+question.option_c)
document.add_paragraph('D. '+question.option_d)
document.add_paragraph('Correct answers : '+question.correct_answer)
document.add_paragraph('Your answer : '+answers[i])
document.add_page_break()
document.save('review.docx')
email_id = []
if userAnswer.user.email_2:
email_id = [userAnswer.user.email_1, userAnswer.user.email_2]
else:
email_id = [userAnswer.user.email_1]
email = EmailMessage(
'Review for Quiz',
'PFA file containing review for MCQ Quiz',
'Team Django-Quiz',
email_id,
)
email.attach_file('review.docx')
email.send(fail_silently=False)
except:
pass
``` |
{
"source": "24doeway/advent_2020",
"score": 3
} |
#### File: 24doeway/advent_2020/day16.py
```python
class Part1:
def __init__(self):
with open("day16.txt") as f:
self.data = [i.strip() for i in f.readlines()]
self.constraints = {}
self.valid = {}
for line in self.data:
if "your ticket" in line:
break
parts = line.split(':')
if len(parts) == 2:
#print(parts)
key = parts[0].strip()
valid = set()
valids = parts[1].split('or')
for v in valids:
bits = [int(i) for i in v.split('-')]
for i in range(bits[0], bits[1]+1):
valid.add(i)
self.valid[i] = 1
self.constraints[key] = valid
section = 1
self.tickets = []
for line in self.data:
if not line:
continue
if "your ticket" in line:
section = 2
continue
if "nearby tickets" in line:
section = 3
continue
if section == 2:
bits = line.split(',')
self.your_ticket = [int(i) for i in line.split(',')]
if section == 3:
t = [int(i) for i in line.split(',')]
self.tickets.append(t)
def run(self):
total = 0
#print (self.valid)
#print(self.tickets)
for t in self.tickets:
for n in t:
if n not in self.valid:
total += n
print(total)
def run2(self):
self.valid_tickets = []
for t in self.tickets:
is_valid = True
for n in t:
if n not in self.valid:
is_valid = False
if is_valid:
self.valid_tickets.append(t)
# Look at each field
ranges = []
for i in range(len(self.tickets[0])):
nums = set()
for t in self.valid_tickets:
nums.add(t[i])
ranges.append(nums)
print(ranges)
candidates = []
for nums in ranges:
candidate = []
for k, v in self.constraints.items():
if not (nums - v):
candidate.append(k)
candidates.append(candidate)
print(candidates)
while True:
removed = False
for c in candidates:
if len(c) == 1:
remove = c[0]
for d in candidates:
if remove in d:
if len(d) > 1:
d.remove(remove)
removed = True
if not removed:
break
print(candidates)
for c in candidates:
print(len(c))
print (len(self.tickets[0]))
print (len(self.valid_tickets))
dep_fields = []
for i, c in enumerate(candidates):
print (i,c)
if "departure" in c[0]:
dep_fields.append(i)
print(dep_fields)
prod = 1
for i in dep_fields:
prod *= self.your_ticket[i]
print(prod)
p = Part1()
p.run()
p.run2()
```
#### File: 24doeway/advent_2020/day8.py
```python
line_num = 0
total = 0
visited = {}
with open("day8.txt") as f:
data = f.readlines()
def nop(num):
global line_num
line_num += 1
def jmp(num):
global line_num
line_num += num
def acc(num):
global total
global line_num
total += num
line_num += 1
cmds = {'nop':nop,
'jmp':jmp,
'acc':acc}
while True:
if line_num in visited:
break
visited[line_num] = 1
line = data[line_num]
parts = line.split()
cmd = parts[0]
num = int(parts[1])
cmds[cmd](num)
print (cmd, num, total)
print(total)
``` |
{
"source": "24HeuresINSA/pass-checker",
"score": 2
} |
#### File: src/pass/admin.py
```python
from django.contrib import admin
from .models import AccessPoint, TimeSlot, Driver, Vehicle, Pass, Access
# Register your models here.
class PassAdmin(admin.ModelAdmin):
list_display = ('vehicle', 'drivers', 'time_slots', 'PS1', 'PS2', 'PS3', 'PS4', 'PS5', 'PS6',)
list_filter = ('allowed_access_points', 'allowed_time_slots',)
def PS1(self, obj):
return "PS1" in (o.name for o in obj.allowed_access_points.all())
def PS2(self, obj):
return "PS2" in (o.name for o in obj.allowed_access_points.all())
def PS3(self, obj):
return "PS3" in (o.name for o in obj.allowed_access_points.all())
def PS4(self, obj):
return "PS4" in (o.name for o in obj.allowed_access_points.all())
def PS5(self, obj):
return "PS5" in (o.name for o in obj.allowed_access_points.all())
def PS6(self, obj):
return "PS6" in (o.name for o in obj.allowed_access_points.all())
def time_slots(self, obj):
to_return = ""
for o in obj.allowed_time_slots.all():
to_return += " - " + o.name
return to_return
def drivers(self, obj):
to_return = ""
for o in obj.allowed_drivers.all():
to_return += " - " + o.__str__()
return to_return
PS1.boolean = True
PS1.short_description = "PS1"
PS2.boolean = True
PS2.short_description = "PS2"
PS3.boolean = True
PS3.short_description = "PS3"
PS4.boolean = True
PS4.short_description = "PS4"
PS5.boolean = True
PS5.short_description = "PS5"
PS6.boolean = True
PS6.short_description = "PS6"
admin.site.register(AccessPoint)
admin.site.register(TimeSlot)
admin.site.register(Driver)
admin.site.register(Vehicle)
admin.site.register(Pass, PassAdmin)
admin.site.register(Access)
``` |
{
"source": "24hOrBust/RisingTide",
"score": 3
} |
#### File: 24hOrBust/RisingTide/climate_model.py
```python
import pymagicc
from pymagicc import scenarios
import copy
# Takes in Gigatons Carbon Dioxide per year and returns time series of temperature
def climate_model(fossil_gtc_carbon):
data = scenarios['RCP26']['WORLD']
copy_row = data.loc[[2007]]
for index, row in data.iterrows():
if index > 2007:
data = data.drop(index)
for i in range(2008, 2101):
temp = copy.deepcopy(copy_row)
for index, row in temp.iterrows():
data.loc[i] = row.tolist()
break
# All future years have this amount of CO2
for i in range(2019, 2101):
data.at[i, "FossilCO2"] = float(fossil_gtc_carbon)
# Funnel into model and parse results
results = pymagicc.run(data)
results_df = results['SURFACE_TEMP']['GLOBAL']
ret_json = {}
ret_json['start_year'] = str(results_df.index[0])
ret_json['global_temps'] = list(results_df.values)
results_df = results['SURFACE_TEMP']['NH-LAND']
# Specific data used by the sea model
ret_json['NH_temps'] = list(results_df.values)
return ret_json
``` |
{
"source": "24hoursfromtulsehill/forced_alignment",
"score": 2
} |
#### File: aligner/command_line/train_and_align.py
```python
from aligner.command_line.align import fix_path, unfix_path
import shutil
import os
import argparse
import multiprocessing as mp
import yaml
import time
from aligner import __version__
from aligner.corpus import Corpus
from aligner.dictionary import Dictionary
from aligner.aligner import TrainableAligner
from aligner.utils import no_dictionary
from aligner.config import TEMP_DIR
def align_corpus(args, skip_input=False):
if not args.temp_directory:
temp_dir = TEMP_DIR
else:
temp_dir = os.path.expanduser(args.temp_directory)
corpus_name = os.path.basename(args.corpus_directory)
if corpus_name == '':
args.corpus_directory = os.path.dirname(args.corpus_directory)
corpus_name = os.path.basename(args.corpus_directory)
data_directory = os.path.join(temp_dir, corpus_name)
conf_path = os.path.join(data_directory, 'config.yml')
if os.path.exists(conf_path):
with open(conf_path, 'r') as f:
conf = yaml.load(f)
else:
conf = {'dirty': False,
'begin': time.time(),
'version': __version__,
'type': 'train_and_align',
'corpus_directory': args.corpus_directory,
'dictionary_path': args.dictionary_path}
if getattr(args, 'clean', False) \
or conf['dirty'] or conf['type'] != 'train_and_align' \
or conf['corpus_directory'] != args.corpus_directory \
or conf['version'] != __version__ \
or conf['dictionary_path'] != args.dictionary_path:
shutil.rmtree(data_directory, ignore_errors=True)
shutil.rmtree(args.output_directory, ignore_errors=True)
os.makedirs(data_directory, exist_ok=True)
os.makedirs(args.output_directory, exist_ok=True)
try:
corpus = Corpus(args.corpus_directory, data_directory, speaker_characters=args.speaker_characters,
num_jobs=getattr(args, 'num_jobs', 3),
debug=getattr(args, 'debug', False),
ignore_exceptions=getattr(args, 'ignore_exceptions', False))
dictionary = Dictionary(args.dictionary_path, data_directory, word_set=corpus.word_set)
utt_oov_path = os.path.join(corpus.split_directory, 'utterance_oovs.txt')
if os.path.exists(utt_oov_path):
shutil.copy(utt_oov_path, args.output_directory)
oov_path = os.path.join(corpus.split_directory, 'oovs_found.txt')
if os.path.exists(oov_path):
shutil.copy(oov_path, args.output_directory)
mono_params = {'align_often': not args.fast}
tri_params = {'align_often': not args.fast}
tri_fmllr_params = {'align_often': not args.fast}
a = TrainableAligner(corpus, dictionary, args.output_directory,
temp_directory=data_directory,
mono_params=mono_params, tri_params=tri_params,
tri_fmllr_params=tri_fmllr_params, num_jobs=args.num_jobs)
a.verbose = args.verbose
a.train_mono()
a.export_textgrids()
a.train_tri()
a.export_textgrids()
a.train_tri_fmllr()
a.export_textgrids()
if args.output_model_path is not None:
a.save(args.output_model_path)
except:
conf['dirty'] = True
raise
finally:
with open(conf_path, 'w') as f:
yaml.dump(conf, f)
def align_corpus_no_dict(args, skip_input=False):
if not args.temp_directory:
temp_dir = TEMP_DIR
else:
temp_dir = os.path.expanduser(args.temp_directory)
corpus_name = os.path.basename(args.corpus_directory)
data_directory = os.path.join(temp_dir, corpus_name)
if args.clean:
shutil.rmtree(data_directory, ignore_errors=True)
shutil.rmtree(args.output_directory, ignore_errors=True)
os.makedirs(data_directory, exist_ok=True)
os.makedirs(args.output_directory, exist_ok=True)
corpus = Corpus(args.corpus_directory, data_directory, args.speaker_characters,
num_jobs=getattr(args, 'num_jobs', 3),
debug=getattr(args, 'debug', False),
ignore_exceptions=getattr(args, 'ignore_exceptions', False))
print(corpus.speaker_utterance_info())
dictionary = no_dictionary(corpus, data_directory)
mono_params = {'align_often': not args.fast}
tri_params = {'align_often': not args.fast}
tri_fmllr_params = {'align_often': not args.fast}
a = TrainableAligner(corpus, dictionary, args.output_directory,
temp_directory=data_directory,
mono_params=mono_params, tri_params=tri_params,
tri_fmllr_params=tri_fmllr_params, num_jobs=args.num_jobs, debug=args.debug)
a.verbose = args.verbose
a.train_mono()
a.export_textgrids()
a.train_tri()
a.export_textgrids()
a.train_tri_fmllr()
a.export_textgrids()
if args.output_model_path is not None:
a.save(args.output_model_path)
def validate_args(args):
if not args.no_dict and args.dictionary_path == '':
raise (Exception('Must specify dictionary or no_dict option'))
if args.no_dict and args.dictionary_path != '':
raise (Exception('Dict_path cannot be specified with no_dict option'))
if __name__ == '__main__': # pragma: no cover
mp.freeze_support()
parser = argparse.ArgumentParser()
parser.add_argument('corpus_directory', help='Full path to the source directory to align')
parser.add_argument('dictionary_path', help='Full path to the pronunciation dictionary to use', nargs='?', default='')
parser.add_argument('output_directory', help="Full path to output directory, will be created if it doesn't exist")
parser.add_argument('-o', '--output_model_path', type=str, default='',
help='Full path to save resulting acoustic and dictionary model')
parser.add_argument('-s', '--speaker_characters', type=str, default='0',
help='Number of characters of filenames to use for determining speaker, '
'default is to use directory names')
parser.add_argument('-t', '--temp_directory', type=str, default='',
help='Temporary directory root to use for aligning, default is ~/Documents/MFA')
parser.add_argument('-f', '--fast', help="Perform a quick alignment with half the number of alignment iterations",
action='store_true')
parser.add_argument('-j', '--num_jobs', type=int, default=3,
help='Number of cores to use while aligning')
parser.add_argument('-v', '--verbose', help="Output debug messages about alignment", action='store_true')
parser.add_argument('--no_dict', help="Create a dictionary based on the orthography", action='store_true')
parser.add_argument('-c', '--clean', help="Remove files from previous runs", action='store_true')
parser.add_argument('-d', '--debug', help="Debug the aligner", action='store_true')
parser.add_argument('-i', '--ignore_exceptions', help='Ignore exceptions raised when parsing data',
action='store_true')
args = parser.parse_args()
fix_path()
try:
args.speaker_characters = int(args.speaker_characters)
except ValueError:
pass
validate_args(args)
if not args.output_model_path:
args.output_model_path = None
temp_dir = args.temp_directory
if args.no_dict:
align_corpus_no_dict(args)
else:
align_corpus(args)
unfix_path()
``` |
{
"source": "24hours/tf_fcn",
"score": 2
} |
#### File: 24hours/tf_fcn/sbdd_input.py
```python
import tensorflow as tf
import scipy
import numpy as np
import scipy.io as spio
# DATA_DIR = '/data/SBDD_Release/dataset/'
DATA_DIR = None
if DATA_DIR is None:
raise Exception('DATA_DIR is not set')
weight_decay = 5e-4
def get_input(input_file, batch_size, im_size=224):
input = DATA_DIR + input_file
filenames = []
with open(input, 'r') as f:
for line in f:
filenames.append('{}/img/{}.jpg {}'.format(
DATA_DIR, line.strip(),
line.strip()))
filename_queue = tf.train.string_input_producer(filenames)
filename, label_dir = tf.decode_csv(filename_queue.dequeue(), [[""], [""]], " ")
label = label_dir;
file_contents = tf.read_file(filename)
im = tf.image.decode_jpeg(file_contents)
im = tf.image.resize_images(im, im_size, im_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
im = tf.reshape(im, [im_size, im_size, 3])
im = tf.to_float(im)
im_mean = tf.constant([122.67892, 116.66877, 104.00699], dtype=tf.float32)
im = tf.sub(im, im_mean)
# im = tf.image.per_image_whitening(im)
# im = tf.image.per_image_whitening(im)
min_queue_examples = int(10000 * 0.4)
example_batch, lbl_batch = tf.train.batch([im, label],
num_threads=1,
batch_size=batch_size,
capacity=min_queue_examples + 3 * batch_size)
return example_batch, lbl_batch
def CACHE_LABEL(input_file, im_size=224):
input = DATA_DIR + input_file
CACHE = {}
with open(input, 'r') as f:
for line in f:
cls_file = line.strip()
# cls_path = '{}/label/{}_label.png'.format(DATA_DIR, cls_file)
# label = scipy.ndimage.imread(cls_path)
# label[label == 65535] = 1
cls_path = '{}cls/{}.mat'.format(DATA_DIR, cls_file)
mat = spio.loadmat(cls_path)
label = mat['GTcls'][0]['Segmentation'][0].astype(np.uint8);
# print label.shape
label = scipy.misc.imresize(label, [im_size, im_size], interp='nearest')
CACHE[cls_file] = np.reshape(label, [1, im_size, im_size])
return CACHE
def GET_LABEL(name_list, cache):
LB = []
for l in name_list:
LB.append(cache[l])
LBL = np.concatenate(LB, axis=0)
return LBL
``` |
{
"source": "24jkeen/Quantitative-analysis-of-approaches-to-group-marking",
"score": 3
} |
#### File: Quantitative-analysis-of-approaches-to-group-marking/FunctionFiles/Analysis.py
```python
from math import fabs as absolute
from math import sqrt
def ResultArray(PopArray, CalcArray):
AbsErrArray = []
count = 0
while count < len(PopArray):
AbsErrArray.append(absolute(CalcArray[count] - PopArray[count]))
count = count + 1
MaxErr = max(AbsErrArray)
AveErr = sum(AbsErrArray) / len(AbsErrArray)
SqrError = [x**2 for x in AbsErrArray]
RMS = sqrt(sum(SqrError) / len(SqrError))
RelErrArray = []
count = 0
while count < len(PopArray):
if PopArray[count] != 0:
RelErrArray.append((AbsErrArray[count]/PopArray[count])*100)
count = count + 1
MaxRelErr = max(RelErrArray)
AveRelErr = sum(RelErrArray) / len(RelErrArray)
ResultDict = {"MaxErr": MaxErr,"AveErr": AveErr, "MaxRelErr": MaxRelErr, "AveRelErr": AveRelErr, "RootMeanSqr": RMS, "List": AbsErrArray}
return ResultDict
```
#### File: Quantitative-analysis-of-approaches-to-group-marking/FunctionFiles/AssignGroup.py
```python
from fractions import gcd
def Function(n, GroupSize):
# check enough n
if n < GroupSize**2:
print("There needs to be more people")
return []
# Initialise the array to hold the groups in
GroupArray = []
i = 0;
while i < n:
GroupArray.append([])
i = i + 1
# Cycle through until GroupArray is full
index = 0
Diff = 1
while index < len(GroupArray):
PersonIndex = 0
i = 0
while i < n/GroupSize:
TempArray = [PersonIndex]
while len(TempArray) < GroupSize:
PersonIndex = IncreasePersonIndex(PersonIndex, Diff, n)
TempArray.append(PersonIndex)
TestUniqueness(TempArray, GroupArray)
GroupArray[index] = TempArray
index = index + 1
i = i + 1
PersonIndex = IncreasePersonIndex(PersonIndex, Diff, n)
Diff = NewDiff(Diff, GroupSize, n)
# Return matrix
return GroupArray
def TestUniqueness(Array, GroupArray):
for Group in GroupArray:
if Group == Array:
print("This does not work")
return True
else:
return False
def IncreasePersonIndex(PersonIndex, Diff, n):
if PersonIndex + Diff < n:
return PersonIndex + Diff
## elif PersonIndex + Diff == n:
## return PersonIndex + Diff + 1 - n
else:
return PersonIndex + Diff - n
def NewDiff(Diff, GroupSize, n):
if Diff == 1:
Diff = Diff + GroupSize
while gcd(Diff, n) != 1:
Diff = Diff + 1
return Diff
else:
Diff = Diff + 1
while gcd(Diff, n) != 1:
Diff = Diff + 1
return Diff
```
#### File: Quantitative-analysis-of-approaches-to-group-marking/ModelFiles/RA.py
```python
import random
MarkRatio = 0.7
SelfRatio = 1 - MarkRatio
# Function takes the ideal mark and modifies it randomly
def SelfAssess(RankedStudentMark):
ReturnMark = RankedStudentMark + random.randrange(-16,16)
if ReturnMark > 100:
return 100
elif ReturnMark < 0:
return 0
else:
return ReturnMark
# To remove randomness, comment out previous code and uncomment code below
#return RankedStudentMark
def RA(Population, GroupMarks, AssignedGroups, GroupSize):
CalcResults = []
for n in range(len(Population)):
CalcResults.append(0)
GroupSize = len(AssignedGroups[0])
for GroupIndex in range(len(AssignedGroups)):
Group = AssignedGroups[GroupIndex]
GroupMark = GroupMarks[GroupIndex]
for person in Group:
SelfAssessment = SelfAssess(Population[person])
Mark = MarkRatio * GroupMark + SelfRatio * SelfAssessment
CalcResults[person] = CalcResults[person] + Mark / GroupSize
return CalcResults
def MRA(Population, GroupMarks, AssignedGroups, GroupSize):
CalcResults = []
for n in range(len(Population)):
CalcResults.append(0)
GroupSize = len(AssignedGroups[0])
for GroupIndex in range(len(AssignedGroups)):
Group = AssignedGroups[GroupIndex]
GroupMark = GroupMarks[GroupIndex]
SelfAssessList = []
for person in Group:
SelfAssessList.append(SelfAssess(Population[person]))
total = sum(SelfAssessList)
for index in range(len(SelfAssessList)):
SelfAssessList[index] = (GroupSize * SelfAssessList[index]) / total
index = 0
for person in Group:
Mark = (MarkRatio + SelfRatio * SelfAssessList[index]) * GroupMark
CalcResults[person] = CalcResults[person] + Mark / GroupSize
index = index + 1
return CalcResults
``` |
{
"source": "24mu13/concierge",
"score": 3
} |
#### File: concierge/tests/test_download.py
```python
import os
import datetime
from PyPDF4 import PdfFileReader
from app.services.ifq import IFQ
today = datetime.date.today()
# def test_exists_for_failure(archiver):
# assert not archiver.exists('how-to-enlarge-your-penis.pdf')
def test_download(ifq):
# filename = today.strftime('ilfatto-%Y%m%d.pdf')
# tmp_file = ifq.download_pdf(today)
tmp_file = <KEY>'
print(tmp_file)
assert os.path.exists(tmp_file)
# check file size
stat = os.stat(tmp_file)
assert stat.st_size > 5 * 1024 * 1024, 'The PDF file size does not reach the minimum value'
# check is a real PDF file
with open(tmp_file, 'rb') as file:
doc = PdfFileReader(file)
print(dir(doc))
print(doc.documentInfo)
assert doc.numPages > 15
# assert ifq.download_pdf(today)
``` |
{
"source": "24sharkS/autowrap-1",
"score": 2
} |
#### File: autowrap-1/autowrap/Utils.py
```python
from __future__ import print_function
import sys
template = """
from distutils.core import setup, Extension
import sys
import pprint
from Cython.Distutils import build_ext
ext = Extension("%(name)s", sources = %(source_files)s, language="c++",
include_dirs = %(include_dirs)r,
extra_compile_args = [%(compile_args)s],
extra_link_args = [],
)
setup(cmdclass = {'build_ext' : build_ext},
name="%(name)s",
version="0.0.1",
ext_modules = [ext]
)
"""
def compile_and_import(name, source_files, include_dirs=None, **kws):
if include_dirs is None:
include_dirs = []
debug = kws.get("debug")
import os.path
import shutil
import tempfile
import subprocess
import sys
tempdir = tempfile.mkdtemp()
if debug:
print("\n")
print("tempdir=", tempdir)
print("\n")
for source_file in source_files:
shutil.copy(source_file, tempdir)
if sys.platform != "win32":
compile_args = "'-Wno-unused-but-set-variable'"
else:
compile_args = ""
include_dirs = [os.path.abspath(d) for d in include_dirs]
source_files = [os.path.basename(f) for f in source_files]
setup_code = template % locals()
if debug:
print("\n")
print("-" * 70)
print(setup_code)
print("-" * 70)
print("\n")
now = os.getcwd()
os.chdir(tempdir)
with open("setup.py", "w") as fp:
fp.write(setup_code)
import sys
sys.path.insert(0, tempdir)
if debug:
print("\n")
print("-" * 70)
import pprint
pprint.pprint(sys.path)
print("-" * 70)
print("\n")
assert subprocess.Popen("%s setup.py build_ext --force --inplace" % sys.executable, shell=True).wait() == 0
print("BUILT")
result = __import__(name)
print("imported")
if debug:
print("imported", result)
sys.path = sys.path[1:]
os.chdir(now)
print(result)
return result
def remove_labels(graph):
_remove_labels = lambda succ_list: [s for s, label in succ_list]
pure_graph = dict((n0, _remove_labels(ni)) for n0, ni in graph.items())
return pure_graph
def find_cycle(graph_as_dict):
""" modified version of
http://neopythonic.blogspot.de/2009/01/detecting-cycles-in-directed-graph.html
"""
nodes = list(graph_as_dict.keys())
for n in graph_as_dict.values():
nodes.extend(n)
todo = list(set(nodes))
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in graph_as_dict.get(top, []):
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
def _check_for_cycles_in_mapping(mapping):
# detect cylces in typedefs
graph = dict()
for (alias, type_) in mapping.items():
successors = type_.all_occuring_base_types()
graph[alias] = successors
cycle = find_cycle(graph)
if cycle is not None:
info = " -> ".join(map(str, cycle))
raise Exception("mapping contains cycle: " + info)
def print_map(mapping):
for k, v in mapping.items():
print("%8s -> %s" % (k, v))
def flatten(mapping):
""" resolves nested mappings, eg:
A -> B
B -> C[X,D]
C -> Z
D -> Y
is resolved to:
A -> Z[X,Y]
B -> Z[X,Y]
C -> Z
D -> Y
"""
_check_for_cycles_in_mapping(mapping)
# this loop only terminates for cylce free mappings:
while True:
for name, type_ in mapping.items():
transformed = type_.transformed(mapping)
if transformed != type_:
mapping[name] = transformed
break
else:
break
``` |
{
"source": "24sharkS/OpenMS",
"score": 2
} |
#### File: src/pyOpenMS/create_r_extension.py
```python
from __future__ import print_function
# windows ?
import sys
iswin = sys.platform == "win32"
# make sure we only log errors and not info/debug ...
from logging import basicConfig
from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG
basicConfig(level=21)
import config
from env import (QT_QMAKE_VERSION_INFO, OPEN_MS_BUILD_TYPE, OPEN_MS_SRC,
OPEN_MS_CONTRIB_BUILD_DIRS, OPEN_MS_LIB, OPEN_SWATH_ALGO_LIB, SUPERHIRN_LIB,
OPEN_MS_BUILD_DIR, MSVS_RTLIBS, OPEN_MS_VERSION,
Boost_MAJOR_VERSION, Boost_MINOR_VERSION)
IS_DEBUG = OPEN_MS_BUILD_TYPE.upper() == "DEBUG"
if iswin and IS_DEBUG:
raise Exception("building pyopenms on windows in debug mode not tested yet.")
# use autowrap to generate Cython and .cpp file for wrapping OpenMS:
import autowrap.Main
import autowrap.CodeGenerator
import autowrap.DeclResolver
import glob
import pickle
import os.path
import os
import shutil
classdocu_base = "http://www.openms.de/current_doxygen/html/"
autowrap.CodeGenerator.special_class_doc = "Documentation is available at " + classdocu_base + "class%(namespace)s_1_1%(cpp_name)s.html\n"
autowrap.DeclResolver.default_namespace = "OpenMS"
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while len(out) < num:
out.append(seq[int(last):int(last + avg)])
last += avg
# append the rest to the last element (if there is any)
out[-1].extend( seq[int(last):] )
return out
j = os.path.join
# Threads and Modules
PY_NUM_THREADS = 1
PY_NUM_MODULES = 1
src_pyopenms = j(OPEN_MS_SRC, "src/pyOpenMS")
pxd_files = glob.glob(src_pyopenms + "/pxds/*.pxd")
addons = glob.glob(src_pyopenms + "/R-addons/*.R")
converters = [j(src_pyopenms, "R-converters")]
persisted_data_path = "include_dir.bin"
extra_cimports = []
# We need to parse them all together but keep the association about which class
# we found in which file (as they often need to be analyzed together)
decls, instance_map = autowrap.parse(pxd_files, ".", num_processes=int(PY_NUM_THREADS))
# Perform mapping
pxd_decl_mapping = {}
for de in decls:
tmp = pxd_decl_mapping.get(de.cpp_decl.pxd_path, [])
tmp.append(de)
pxd_decl_mapping[ de.cpp_decl.pxd_path] = tmp
# Split into chunks based on pxd files and store the mapping to decls, addons
# and actual pxd files in a hash. We need to produce the exact number of chunks
# as setup.py relies on it as well.
pxd_files_chunk = chunkIt(list(pxd_decl_mapping.keys()), int(PY_NUM_MODULES))
# Sanity checks: we should find all of our chunks and not have lost files
if len(pxd_files_chunk) != int(PY_NUM_MODULES):
raise Exception("Internal Error: number of chunks not equal to number of modules")
if sum([len(ch) for ch in pxd_files_chunk]) != len(pxd_decl_mapping):
raise Exception("Internal Error: chunking lost files")
mnames = ["pyopenms_%s" % (k+1) for k in range(int(PY_NUM_MODULES))]
allDecl_mapping = {}
for pxd_f, m in zip(pxd_files_chunk, mnames):
tmp_decls = []
for f in pxd_f:
tmp_decls.extend( pxd_decl_mapping[f] )
allDecl_mapping[m] = {"decls" : tmp_decls, "addons" : [] , "files" : pxd_f}
# Deal with addons, make sure the addons are added to the correct class generator.
is_added = [False for k in addons]
for modname in mnames:
for k,a in enumerate(addons):
# Match addon basename to pxd basename
for pfile in allDecl_mapping[modname]["files"]:
if os.path.basename(a).split(".")[0] == os.path.basename(pfile).split(".")[0]:
allDecl_mapping[modname]["addons"].append(a)
is_added[k] = True
if is_added[k]:
continue
# Also match by class name (sometimes one pxd contains multiple classes
# and the addon is named after one of them)
for dclass in allDecl_mapping[modname]["decls"]:
if os.path.basename(a) == dclass.name + ".R":
allDecl_mapping[modname]["addons"].append(a)
is_added[k] = True
# add any addons that did not get added anywhere else
for k, got_added in enumerate(is_added):
if not got_added:
# add to all modules
for m in mnames:
allDecl_mapping[m]["addons"].append( addons[k] )
def doRCodeGeneration(modname, allDecl_mapping, instance_map, converters):
# create the ropenms/ and R/ directory.
src_code_dir = j(OPEN_MS_SRC,"src/ropenms/R")
try:
os.makedirs(src_code_dir)
except OSError:
pass
m_filename = j(src_code_dir,"%s.R" % modname)
cimports, manual_code = autowrap.Main.collect_manual_code(allDecl_mapping[modname]["addons"])
autowrap.Main.register_converters(converters)
# Need to specify binding "R" as default is "cython"
autowrap_include_dirs = autowrap.generate_code(allDecl_mapping[modname]["decls"], instance_map,
target=m_filename, debug=False, manual_code=manual_code,
extra_cimports=cimports,
include_boost=False, include_numpy=True, allDecl=allDecl_mapping, target_binding="R")
allDecl_mapping[modname]["inc_dirs"] = autowrap_include_dirs
#
# copy the tests folder from ropenms-attachments to ropenms
#
dest = j(OPEN_MS_SRC,"src/ropenms/tests")
if os.path.isdir(dest):
shutil.rmtree(dest, ignore_errors=True)
src = j(OPEN_MS_SRC,"src/pyOpenMS/ropenms-attachments/tests")
shutil.copytree(src,dest)
return autowrap_include_dirs
for modname in mnames:
autowrap_include_dirs = doRCodeGeneration(modname, allDecl_mapping, instance_map, converters)
pickle.dump(autowrap_include_dirs, open(persisted_data_path, "wb"))
argzip = [ (modname, allDecl_mapping[modname]["inc_dirs"]) for modname in mnames]
print("created ropenms package")
``` |
{
"source": "24TangC/gpa-calculator",
"score": 3
} |
#### File: 24TangC/gpa-calculator/calculateGPA.py
```python
import json
from classes import classes
class calcGPA:
def __init__(self, name):
with open('unweightedscale.json', 'r') as f:
info = json.load(f)
self.un_cumulative = 0
self.we_cumulative = 0
self.num_creds = 0
with open('grades.json', 'r') as a:
grads = json.load(a)
with open('credits.json', 'r') as b:
creds = json.load(b)
with open('level.json', 'r') as c:
with open('weightedscale.json', 'r') as d:
w_creds = json.load(d)
lvls = json.load(c)
for key in creds:
self.num_creds += creds[key]
for key in grads:
cred = creds[key]
self.un_cumulative += info[grads[key]]*cred
self.we_cumulative += (info[grads[key]]+ w_creds[lvls[key]]) * cred
self.unweighted_gpa = self.un_cumulative/self.num_creds
self.weighted_gpa = self.we_cumulative/self.num_creds
```
#### File: 24TangC/gpa-calculator/classes.py
```python
class classes:
def __init__(self, name):
print('How many classes are you taking?')
self.num_classes = int(input())
self.grades = {}
self.credits = {}
self.level = {}
for x in range(self.num_classes):
print(f'What is your #{x+1} input class?')
temp = input()
print(f'What is the level of {temp}? AP - 1, Honors - 2, CP - 3')
self.level[temp] = int(input())
print(f'What is your letter grade in {temp}?')
self.grades[temp] = input()
print(f'How many credits is this class? (Out of 5)')
self.credits[temp] = float(input())
``` |
{
"source": "24TangC/USACO-Bronze",
"score": 4
} |
#### File: USACO-Bronze/solutions/awk digits.py
```python
base_2_N = input()
base_3_N = input()
def convert(n):
n = int(n)
base_3 = []
if n < 3:
return str(n)
while n // 3 > 0:
base_3.append(str(n%3))
n //= 3
base_3.append(str(n))
base_3 = base_3[::-1]
number = ''.join(base_3)
return number
for x in range(len(base_2_N)):
temp_base_2_N = []
for z in range(len(base_2_N)): #list
temp_base_2_N.append(base_2_N[z])
if base_2_N[x] == '1':
temp_base_2_N[x] = '0'
else:
temp_base_2_N[x] = '1'
temp_base_2_N = ''.join(temp_base_2_N)
N = str(int(temp_base_2_N, 2))
N = convert(N)
counter = 0
if len(base_3_N) == len(N)+1:
N = '0' + N
elif abs(len(base_3_N)-len(N)) >= 2:
continue
elif len(base_3_N)+1 == len(N):
continue
for y in range(len(N)):
if base_3_N[y] == N[y]:
continue
else:
counter += 1
if counter == 1:
print(int(N, 3))
exit()
```
#### File: USACO-Bronze/solutions/sanjitsol4.py
```python
buck1 = [int(x) for x in input().split()]
buck2 = [int(x) for x in input().split()]
milk1 = 1000
milk2 = 1000
outs = {}
def moveFrom(fromMilk, toMilk, fromBuck, toBuck, iters):
if iters == 4:
outs[fromMilk] = True
return
for i in range(len(fromBuck)):
rem = fromBuck.pop(i)
if fromMilk >= rem:
toBuck.append(rem)
fromMilk -= rem
toMilk += rem
moveFrom(toMilk, fromMilk, toBuck, fromBuck, iters + 1)
toBuck.pop(len(toBuck) - 1)
fromMilk += rem
toMilk -= rem
fromBuck.insert(i, rem)
moveFrom(milk1, milk2, buck1, buck2, 0)
print(len(outs))
``` |
{
"source": "24technology/addon-drbdmanage",
"score": 3
} |
#### File: 24technology/addon-drbdmanage/validate_config.py
```python
import sys
from contextlib import contextmanager
config_file = sys.argv[1]
def report_validity(valid_config=True):
"""Prints the whether the config file is valid or not."""
validity = "valid" if valid_config else "not valid"
print("\nYour configuration is %s.\n" % validity)
@contextmanager
def report_on(error, message):
try:
yield
except error as e:
print(message)
report_validity(False)
sys.exit(e)
# Assume config is valid to start.
valid_config = True
# Convert configuration file into dict.
config = {}
with open(config_file) as f:
for line in f:
key, value = line.split("=")
config[key.strip()] = value.strip()
quotes = "'\""
# Cast config values to proper types.
with report_on(KeyError, "BRIDGE_LIST must be present in configuration"):
storage_nodes = config["BRIDGE_LIST"].strip(quotes).split()
try:
deployment_nodes = config["DRBD_DEPLOYMENT_NODES"].strip(quotes).split()
except KeyError:
deployment_nodes = False
pass
with report_on(ValueError, "DRBD_REDUNDANCY must be an integer."):
try:
redundancy_level = int(config["DRBD_REDUNDANCY"])
except KeyError:
redundancy_level = False
pass
# Check that only one deployment option is configured.
if not bool(deployment_nodes) ^ bool(redundancy_level):
valid_config = False
print("You must have one and only one of the following configured!")
print("DRBD_DEPLOYMENT_NODES")
print("DRBD_REDUNDANCY")
# Check that deployment_nodes are a subset of, or equal to, all storage nodes.
if deployment_nodes:
for node in deployment_nodes:
if node not in storage_nodes:
valid_config = False
print("%s not found in bridge list!"
" Nodes in DRBD_DEPLOYMENT_NODES"
" must be included in BRIDGE_LIST." % node)
if len(deployment_nodes) > len(storage_nodes):
valid_config = False
print("DRBD_DEPLOYMENT_NODES contains more nodes than BRIDGE_LIST.")
print("BRIDGE_LIST must contain all storage nodes.")
# Check that redundancy level is not out of bounds.
if redundancy_level:
if not 0 <= redundancy_level <= len(storage_nodes):
valid_config = False
print("DRBD_REDUNDANCY must be a positive integer that is "
"less than or equal to the number of nodes in BRIDGE_LIST")
# Checks for optional attributes.
if "DRBD_MIN_RATIO" in config:
with report_on(ValueError, "DRBD_MIN_RATIO must be a float."):
ratio = float(config["DRBD_MIN_RATIO"])
if not 0.0 <= ratio <= 1.0:
valid_config = False
print("DRBD_MIN_RATIO must be between 0.0 and 1.0.")
if "DRBD_MIN_COUNT" in config:
with report_on(ValueError, "DRBD_MIN_COUNT must be an integer."):
count = int(config["DRBD_MIN_COUNT"])
if not 0 <= count <= len(storage_nodes):
valid_config = False
print("DRBD_MIN_COUNT must be between 0 and "
"the number of storage nodes.")
if "DRBD_SUPPORT_LIVE_MIGRATION" in config:
choice = config["DRBD_SUPPORT_LIVE_MIGRATION"]
valid_options = ["yes", "no"]
if choice not in valid_options:
valid_config = False
print("DRBD_SUPPORT_LIVE_MIGRATION must be 'yes' or 'no'")
report_validity(valid_config)
``` |
{
"source": "24-timmarsseglingarna/app",
"score": 3
} |
#### File: app/tools/pod-xml-to-geojson.py
```python
import xml.etree.ElementTree as etree
import argparse
import re
import json
import io
import sys
import os.path
import datetime
if sys.version < '3':
import codecs
# points number 9000 and above are not real points; they are used to mark
# area borders
MAXPOINT=8999
def run():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="input file")
parser.add_argument("-o", "--outfile", help="output file")
parser.add_argument("--id", help="id of terrain")
parser.add_argument("--javascript", action="store_true")
args = parser.parse_args()
tree = etree.parse(args.infile)
all_points, start_points, turning_points = get_points(tree)
inshore_legs, offshore_legs = get_legs(tree, all_points)
output_pod(args.outfile, args.javascript, args.id,
[('startPoints', start_points),
('turningPoints', turning_points),
('inshoreLegs', inshore_legs),
('offshoreLegs', offshore_legs)])
def output_pod(fname, javascript, id, features):
if sys.version < '3':
fd = codecs.open(fname, "w", encoding="utf-8")
else:
fd = io.open(fname, "w", encoding="utf-8")
if javascript:
fd.write(u'/* eslint-disable */\n')
fd.write(u'export var basePodSpec = ')
fd.write(u'{"id": %s, ' % id)
flen = len(features)
i = 1
for (name, obj) in features:
fd.write(u'"%s": {"type": "FeatureCollection",'
'"crs": { "type": "name",'
'"properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },'
'"features":' % name)
fd.write(json.dumps(obj, ensure_ascii=False))
if i == flen:
fd.write(u'}')
else:
i = i + 1
fd.write(u'},\n')
if javascript:
fd.write(u'};\n')
else:
fd.write(u'}\n')
def get_points(tree):
doc = tree.getroot()
startnumbers = {}
all_points = {}
start_points = []
turning_points = []
for n in doc.findall("kretsar/krets/startpoints/number"):
startnumbers[n.text] = True
for p in doc.findall("points/point"):
number = p.find("number").text
if int(number) > MAXPOINT:
continue
name = p.find("name").text
descr = p.find("descr").text
lat = p.find("lat").text
lng = p.find("long").text
footnote = None
footnoteelem = p.find("footnote")
if footnoteelem is not None:
footnote = footnoteelem.text
properties = {"number": number,
"name": name,
"descr": descr}
if footnote != None:
properties["footnote"] = footnote
coordinates = [float(lng), float(lat)]
geometry = {"type": "Point",
"coordinates": coordinates}
point = {"type": "Feature",
"properties": properties,
"geometry": geometry},
if number in startnumbers:
start_points.extend(point)
else:
turning_points.extend(point)
all_points[number] = coordinates
return all_points, start_points, turning_points
def get_legs(tree, all_points):
doc = tree.getroot()
coast = []
offshore = []
for p in doc.findall("legs/leg"):
src = p.find("from").text
dst = p.find("to").text
if int(src) > MAXPOINT or int(dst) > MAXPOINT:
continue
if int(src) < int(dst):
# since all legs are present twice (in both directions),
# skip one direction
continue
dist = p.find("dist").text
sea = p.find("sea").text
addtime = p.find("addtime").text
if dist is None:
print("** error: no distance: src: %s dst: %s" % (src, dst))
properties = {"src": src,
"dst": dst,
"dist": float(dist)}
if properties["dist"] == 0 and addtime == "1":
properties["addtime"] = True;
src_coords = all_points[src]
dst_coords = all_points[dst]
geometry = {"type": "LineString",
"coordinates": [src_coords, dst_coords]}
leg = {"type": "Feature",
"properties": properties,
"geometry": geometry},
if sea == "0":
coast.extend(leg)
else:
offshore.extend(leg)
return coast, offshore
if __name__ == '__main__':
run()
``` |
{
"source": "24-Tony/BUGSS_Interface",
"score": 3
} |
#### File: 24-Tony/BUGSS_Interface/asciify.py
```python
from PIL import Image
import sys
import urllib.request
#import urllib, cStringIO
import requests
#im = Image.open(requests.get(url, stream=True).raw)
ASCII_CHARS = ['.',',',':',';','+','*','?','%','S','#','@']
#ASCII_CHARS = ['..',',,','::',';;','++','**','??','%%','SS','##','@@']
ASCII_CHARS = ASCII_CHARS[::-1]
'''
method resize():
- takes as parameters the image, and the final width
- resizes the image into the final width while maintaining aspect ratio
'''
def resize(image, new_width):
(old_width, old_height) = image.size
aspect_ratio = float(old_height)/float(old_width)
new_height = int((aspect_ratio * new_width)/2)
new_dim = (new_width, new_height)
new_image = image.resize(new_dim)
return new_image
'''
method grayscalify():
- takes an image as a parameter
- returns the grayscale version of image
'''
def grayscalify(image):
return image.convert('L')
'''
method modify():
- replaces every pixel with a character whose intensity is similar
'''
def modify(image, buckets=25):
initial_pixels = list(image.getdata())
new_pixels = [ASCII_CHARS[pixel_value//buckets] for pixel_value in initial_pixels]
return ''.join(new_pixels)
'''
method do():
- does all the work by calling all the above functions
'''
def do(image, new_width):
image = resize(image,new_width)
image = grayscalify(image)
pixels = modify(image)
len_pixels = len(pixels)
# Construct the image from the character list
new_image = [pixels[index:index+new_width] for index in range(0, len_pixels, new_width)]
return '\n'.join(new_image)
'''
method runner():
- takes as parameter the image path and runs the above code
- handles exceptions as well
- provides alternative output options
'''
def Asciify(path,newSize):
image = None
IMG=None
try:
image = Image.open(path)
except:
try:
urllib.request.urlretrieve(path, 'a.'+path[-3:])
image = Image.open('a.png')
except:
try:
image = Image.open(requests.get(path, stream=True).raw)
except:
print("Unable to find image in",path)
#print(e)
return
image = do(image,newSize)
return(image)
def asciify(path,newSize):
IMG=None
image = None
try:
image = Image.open(path)
except:
try:
urllib.request.urlretrieve(path, 'a.'+path[-3:])
image = Image.open('a.png')
except:
try:
image = Image.open(requests.get(path, stream=True).raw)
except:
print("Unable to find image in",path)
#print(e)
return
image = do(image,newSize)
print(image)
def Version():
return('Current-2021-07-28')
``` |
{
"source": "24-Tony/HTML_Python_Syntax_Highlighter",
"score": 2
} |
#### File: 24-Tony/HTML_Python_Syntax_Highlighter/Html_Code_Formatter.py
```python
import re
import os
##try:
## from pip import main as pipmain
##except:
## from pip._internal.main import main as pipmain
import subprocess
from subprocess import *
##import requests
##import io
##import zipfile
from urllib import request
import requests
def download_extract_zip(url):
"""
Download a ZIP file and extract its contents in memory
yields (filename, file-like object) pairs
"""
response = requests.get(url)
with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:
for zipinfo in thezip.infolist():
with thezip.open(zipinfo) as thefile:
yield zipinfo.filename, thefile
import sys
py = os.path.join(sys.exec_prefix,'python.exe')
def downloadZip(url,savePath):
remoteZip = request.urlopen(url)
local_file = open(savePath, 'wb')
local_file.write(remoteZip.read())
local_file.close()
def install(package):
if type(package) == list:
for p in package:
print(f"Installing {p}")
#call(['py', '-m', 'pip', 'install',package])
proc = subprocess.Popen([py, '-m', 'pip', 'install',p],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
(stdout, stderr) = proc.communicate()
#print(type(stderr))
if proc.returncode != 0:
print(str(stderr))
else:
print("success")
#pipmain(['install', package])
else:
#call(['py', '-m', 'pip', 'install',package])
print(f"Installing {package}")
proc = subprocess.Popen([py, '-m', 'pip', 'install',package],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
(stdout, stderr) = proc.communicate()
#print(type(stderr))
if proc.returncode != 0:
print(str(stderr))
else:
print("success")
#pipmain(['install', package])
from tkinter import *
##import requests
##import io
##import zipfile
##def download_extract_zip(url):
## """
## Download a ZIP file and extract its contents in memory
## yields (filename, file-like object) pairs
## """
## response = requests.get(url)
## with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:
## for zipinfo in thezip.infolist():
## with thezip.open(zipinfo) as thefile:
## yield zipinfo.filename, thefile
try:
from tkhtmlview import HTMLLabel,HTMLScrolledText
import pyperclip as pc
except ModuleNotFoundError:
print('Running some initialization...')
pack = ['tkhtmlview','pyperclip']#['requests','Pillow','tkhtmlview',]
install(pack)
#py -m pip install requests
#py -m pip install Pillow
#pip install tkhtmlview
#py -m pip install tkhtmlview
from tkhtmlview import HTMLLabel,HTMLScrolledText
import pyperclip as pc
##THEMEpath = os.path.join(os.path.expanduser('~'),'Downloads','tcl-awthemes.zip')
##if not os.path.exists(THEMEpath):
## print('Retreving data...')
## #download_extract_zip('https://sourceforge.net/projects/tcl-awthemes/files/latest/download')
## downloadZip('https://sourceforge.net/projects/tcl-awthemes/files/latest/download',THEMEpath)
##try:
## from tkinterhtml import HTMLLabel
##except:
## print('Running some initialization...')
## print(' installing tkhtmlview...')
## #pipmain(['install','--upgrade','--force-reinstall','pip'])
## #install('Pillow')
## #install('tk_html_widgets')
##
## path = os.path.join(os.path.expanduser('~'),'Downloads','tkhtmlview-master.zip')
## #f = open(path,'w+')
## #call(["curl", "-O", "https://github.com/pozzolana93/tkhtmlview/archive/refs/heads/master.zip"], stdout=f)
## #call(["curl -O https://github.com/pozzolana93/tkhtmlview/archive/refs/heads/master.zip > tkhtmlview-master.zip"], shell=True)
## downloadZip("https://github.com/pozzolana93/tkhtmlview/archive/refs/heads/master.zip",path)
## #f.close()
## try:
## a = open(path)
## a.close()
## print('Created file at:',path)
## except:
## print('installation failure')
## install(os.path.join(os.path.expanduser('~'),'Downloads','tkhtmlview-master.zip'))
## #print('Installed')
## #print(' installing tkinter...')
## #install('tkinter')
## #print( help('modules'))
## from tkinterhtml import HTMLLabel
##
# Set the default input termination code
DEFAULT_INPUT_TERMINATION_CODE = '--Terminate--'
version='2.0.0'
print(f'v {version}')
Bold = ';font-weight: 900'
# Define the default colors
Border = '#eeeeee'
Background = '#ffffff'
Foreground = '#000000'
Comment = '#DD0000'
String = '#00aa00'
Keywords = '#ff7700'
Builtins = '#900090'
Definitions = '#0000ff'
Numbers = '#0000ff'
Errors = '#ff0000'
Output = '#0000ff'
lThemes = {'Python Default':{
'Border':'#555555',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#DD0000',
'String':'#00aa00',
'Keywords':'#ff7700',
'Builtins':'#900090',
'Definitions':'#0000ff',
'Numbers':'#000000',
'Errors':'#ff0000'
},
'Python Bolded':{
'Border':'#555555',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#DD0000',
'String':'#00aa00',
'Keywords':'#ff7700'+Bold,
'Builtins':'#900090',
'Definitions':'#0000ff'+Bold,
'Numbers':'#0000ff'+Bold,
'Errors':'#ff0000'
},
'Royal Blue':{
'Border':'#555566',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#00aa00',
'String':'#DD0000',
'Keywords':'#0000ff'+Bold,
'Builtins':'#000099',
'Definitions':'#000099'+Bold,
'Numbers':'#000099'+Bold,
'Errors':'#ff0000'
},
'Sea':{
'Border':'#445566',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#445a71',
'String':'#229922',
'Keywords':'#0077aa'+Bold,
'Builtins':'#D86149',
'Definitions':'#04B7BD'+Bold,
'Numbers':'#0077aa'+Bold,
'Errors':'#ff0000'
},
'Red':{
'Border':'#665555',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#445a71',
'String':'#22863A',
'Keywords':'#D73A49'+Bold,
'Builtins':'#6F42C1',
'Definitions':'#AB0112'+Bold,
'Numbers':'#AB0112'+Bold,
'Errors':'#ff0000'
},
'Grey':{
'Border':'#555555',
'Background':'#eeeeee',
'Foreground':'#7B776F',
'Comment':'#445a71',
'String':'#C05726',
'Keywords':'#3080B5'+Bold,
'Builtins':'#C05726',
'Definitions':'#ff7700'+Bold,
'Numbers':'#000077'+Bold,
'Errors':'#ff0000'
},
'Glisten':{
'Border':'#808080',
'Background':'#ffffff',
'Foreground':'#333333',
'Comment':'#445a71',
'String':'#DD4422',
'Keywords':'#008800'+Bold,
'Builtins':'#008800',
'Definitions':'#0066BB'+Bold,
'Numbers':'#0000dd'+Bold,
'Errors':'#ff0000'
}
}
dThemes = {'Default Dark':{
'Border':'#555555',
'Background':'#222222',
'Foreground':'#dddddd',
'Comment':'#ff3333',
'String':'#00ff00',
'Keywords':'#ffaa22'+Bold,
'Builtins':'#ff40c0',
'Definitions':'#00ccff'+Bold,
'Numbers':'#55aaff'+Bold,
'Errors':'#ff0000'
},
'Rainglow':{
'Border':'#5298c4',
'Background':'#040507',
'Foreground':'#ffffff',
'Comment':'#6f809f',
'String':'#64aeb3',
'Keywords':'#508aaa'+Bold,
'Builtins':'#6ab0a3',
'Definitions':'#00838C'+Bold,
'Numbers':'#529CA8'+Bold,
'Errors':'#ff0000'
},
'Bold':{
'Border':'#3d8e91',
'Background':'#0f0d0d',
'Foreground':'#ffffff',
'Comment':'#6f809f',
'String':'#F7A21B',
'Keywords':'#F0624B'+Bold,
'Builtins':'#3D8E91',
'Definitions':'#B4B7AD'+Bold,
'Numbers':'#F7A21B'+Bold,
'Errors':'#ff0000'
},
'Dark+':{
'Border':'#555555',
'Background':'#1e1e1e',
'Foreground':'#D4D4D4',
'Comment':'#6A9955',
'String':'#CE9178',
'Keywords':'#569CD6'+Bold,
'Builtins':'#dcdcaa',
'Definitions':'#9CDCFE'+Bold,
'Numbers':'#B5CEA8'+Bold,
'Errors':'#ff0000'
},
'Citylights':{
'Border':'#171d23',
'Background':'#1d252c',
'Foreground':'#718CA1',
'Comment':'#72899e',
'String':'#68A1F0',
'Keywords':'#508aaa'+Bold,
'Builtins':'#70E1E8',
'Definitions':'#24A5AF'+Bold,
'Numbers':'#E27E8D'+Bold,
'Errors':'#ff0000'
},
'Panda':{
'Border':'#676B79',
'Background':'#292a2b',
'Foreground':'#E6E6E6',
'Comment':'#737787',
'String':'#19F9D8',
'Keywords':'#FFB86C'+Bold,
'Builtins':'#FF75B5',
'Definitions':'#6FC1FF'+Bold,
'Numbers':'#FFB86C'+Bold,
'Errors':'#ff0000'
},
'Rose':{
'Border':'#f52277',
'Background':'#141322',
'Foreground':'#B4DAE9',
'Comment':'#45898C',
'String':'#C01B5D',
'Keywords':'#FB4293'+Bold,
'Builtins':'#F37AB0',
'Definitions':'#8CE1E7'+Bold,
'Numbers':'#E5FCA6'+Bold,
'Errors':'#ff0000'
},
'Sea Green':{
'Border':'#242b38',
'Background':'#0a1018',
'Foreground':'#EEFFFF',
'Comment':'#708394',
'String':'#28735E',
'Keywords':'#25A2A6'+Bold,
'Builtins':'#5CB4DE',
'Definitions':'#4785bd'+Bold,
'Numbers':'#28735E'+Bold,
'Errors':'#ff0000'
},
'Firefly':{
'Border':'#333333',
'Background':'#0a0a0a',
'Foreground':'#a8aebd',
'Comment':'#626a73',
'String':'#a4bd00',
'Keywords':'#ff0066'+Bold,
'Builtins':'#ff8533',
'Definitions':'#827db5'+Bold,
'Numbers':'#E6E600'+Bold,
'Errors':'#ff0000'
},
'Monikai':{
'Border':'#333333',
'Background':'#121212',
'Foreground':'#bbbbbb',
'Comment':'#5C6370',
'String':'#E5C07B',
'Keywords':'#56B6C2'+Bold,
'Builtins':'#E06C75',
'Definitions':'#98C379'+Bold,
'Numbers':'#C678DD'+Bold,
'Errors':'#ff0000'
},
'Black Ocean':{
'Border':'#0c5271',
'Background':'#101316',
'Foreground':'#DFDFDF',
'Comment':'#60778c',
'String':'#7ebea0',
'Keywords':'#007aae'+Bold,
'Builtins':'#019d76',
'Definitions':'#15b8ae'+Bold,
'Numbers':'#15b8ae'+Bold,
'Errors':'#ff0000'
},
'CodePen':{
'Border':'#9CA0B1',
'Background':'#1e1f27',
'Foreground':'#D5D7DE',
'Comment':'#88AFBF',
'String':'#2BC7B9',
'Keywords':'#47CF73'+Bold,
'Builtins':'#5E91F2',
'Definitions':'#9CA0B1'+Bold,
'Numbers':'#2BC7B9'+Bold,
'Errors':'#ff0000'
},
'Acme':{
'Border':'#a2a797',
'Background':'#0f0e0d',
'Foreground':'#EDEBE6',
'Comment':'#988e86',
'String':'#E0A84C',
'Keywords':'#CF433E'+Bold,
'Builtins':'#CD122C',
'Definitions':'#d96972'+Bold,
'Numbers':'#a5e3d0'+Bold,
'Errors':'#ff0000'
},
'Arc':{
'Border':'#3c3c3c',
'Background':'#111111',
'Foreground':'#EDEBE6',
'Comment':'#888888',
'String':'#ff1ba9',
'Keywords':'#f28888'+Bold,
'Builtins':'#a80000',
'Definitions':'#A5E3D0'+Bold,
'Numbers':'#a5e3d0'+Bold,
'Errors':'#ff0000'
}
}
# Define the syntax separators
separators = [',','(','[',']',')','{','}','/','*','+','-','.','|',':',';','⋞','⋟','=']
# Create a splitting function that takes a list of separators
# Code courtesy of DelftStack: https://www.delftstack.com/howto/python/how-to-split-string-with-multiple-delimiters-in-python/#make-it-a-function - I edited it some though
def custom_split(sepr_list, str_to_split):
# Duplicate and sort the separator list
sepr_list2 = list(sepr_list)
sepr_list2.sort(key=len,reverse=True)
# create regular expression dynamically
regular_exp = '|'.join(map(re.escape, sepr_list2))
# Return the list of splits
return re.split(regular_exp, str_to_split)
def Input(prompt=None,ml = False,Terminate=DEFAULT_INPUT_TERMINATION_CODE):
contents = input(prompt)
while ml:
try:
line = input()
if line == Terminate:
break
contents+='\n'+line
except EOFError:
break
#contents+='\n'+line
return contents
# create a list of instances that would set off a string with single quotes
str1 = ['\'', 'f\'','u\'','b\'','r\'','\'\'\'','f\'\'\'','u\'\'\'','b\'\'\'','r\'\'\'']
# create a list of instances that would set off a string with double quotes
str2 = ['"', 'r"','f"','u"','b"','"""','f"""','u"""','b"""','r"""']
#Create a dictionary with keywords and the like
Colors = {
#Comment : ['#','##'],
#String : ['\'', '"','f\'','u\'','b\'','r\'','r"','f"','u"','b"'],
'Keywords' : ['and','as','assert','break','class','continue','def','del','elif','else','except','False','finally','for','from','global','if','import','in','is','lambda','None','nonlocal',
'not','or','pass','raise','return','True','try','while','with','yield'],
'Builtins' : ['ValueError', 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BlockingIOError', 'BrokenPipeError', 'BufferError', 'BytesWarning', 'ChildProcessError',
'ConnectionAbortedError', 'ConnectionError', 'ConnectionRefusedError', 'ConnectionResetError', 'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception',
'FileExistsError', 'FileNotFoundError', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError',
'InterruptedError', 'IsADirectoryError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'ModuleNotFoundError', 'NameError', 'NotADirectoryError', 'NotImplemented',
'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning', 'PermissionError', 'ProcessLookupError', 'ReferenceError', 'RecursionError', 'ResourceWarning',
'RuntimeError', 'RuntimeWarning', 'StopAsyncIteration', 'StopIteration', 'SystemError', 'SyntaxError', 'SyntaxWarning', 'SystemExit', 'TabError', 'TimeoutError', 'TypeError',
'UnicodeDecodeError', 'UnboundLocalError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning', 'WindowsError',
'ZeroDivisionError', 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray', 'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright', 'credits', 'delattr',
'dict', 'dir', 'divmod', 'enumerate', 'eval', 'exec', 'exit', 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int',
'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', 'property','quit', 'range',
'repr', 'reversed', 'round', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'vars', 'zip'],
'Numbers' : [],
'Error' : [],
'Output' : []
}
# Create a dictionary with syntax ends - blank for continuous highlighting
ends = {
'Comment': '',
'String': '',
'Keywords': '</span>',
'Builtins': '</span>',
'Numbers': '</span>'
}
# Create a dictionary with string to be replaced
Replacers = {'<':'⋞',
'>':'⋟',
'<p>':'',
'</p>':' <br> ',
'\n':' <br> '}
ColorToType = {
"Border" : Border,
"Background" : Background,
"Foreground" : Foreground,
"Comment" : Comment,
"String" : String,
"Keywords" : Keywords,
"Builtins" : Builtins,
"Definitions" : Definitions,
"Numbers" : Numbers,
"Error" : Errors,
"Output" : Output
}
# Create A Theme setter
def setColorScheme(Color):
try:
Color = int(Color)
except:
pass
try:
if type(Color) == type(2):
theme = Themes[list(Themes)[Color-1]]
else:
theme = Themes[Color]
global Border, Background, Foreground, Comment, String, Keywords, Builtins, Definitions, Colors, Numbers, Errors, Output
Border = theme['Border']
Background = theme['Background']
Foreground = theme['Foreground']
Comment = theme['Comment']
String = theme['String']
Keywords = theme['Keywords']
Builtins = theme['Builtins']
Definitions = theme['Definitions']
Numbers = theme['Numbers']
Errors = theme['Errors']
Output = theme['Definitons']
except:
print('Invalid Theme')
#OLD
def Preview(code):
import datetime
import webbrowser
Path = os.path.join(os.path.expanduser('~'),'Downloads','Html - CodeSnippets')
if not os.path.exists(Path):
os.makedirs(Path)
Name = ('Code Snippet.html')# - '+str(datetime.datetime.now())[:-10].replace(':','.')+'.html')
fn = os.path.join(Path,Name)
f = open(fn, 'w+', errors='replace')
f.write(code)
f.close()
webbrowser.open_new(fn)
#NEW
def Compute(Code,color,inline):
print(color)
if color=='':
color = list(Themes)[0]
color = list(Themes).index(color)+1
print(color)
inline = bool(inline)
Raw = htmlCode(Code,color=color,inline=inline)
global root,clicked,drop,text_box,OutText_box,OUTPUT
OutText_box.config(state='normal')
OutText_box.delete(1.0, 'end')
OutText_box.insert(1.0, Raw.code)
OutText_box.config(state='disabled')
if inline:
a = Raw.code
bg = Raw.code[106:113]
else:
a = Raw.code[8:]
bg = Raw.code[184:191]
OUTPUT['bg'] = bg
OUTPUT.set_html(a)
print(bg)
import random
from tkinter import scrolledtext,ttk
from functools import partial
import win32gui
#from io import BytesIO
import base64, PIL, urllib
from PIL import ImageTk
import urllib.request
class Interface():
def __init__(self):
global root,clicked,drop,text_box,OutText_box,OUTPUT,ISINLINE
root = Tk()
win32gui.SetForegroundWindow(root.winfo_id())
root.title(f'HTML Syntax Highlighter v{version}')
## root.tk.call('lappend', 'auto_path', os.path.join(THEMEpath,'awthemes-10.4.0','pkgIndex.tcl'))
## root.tk.call('package', 'require', 'awdark')
## root.style(style='Fun.root')
fg='#ff3333'#'#3D8E91'
bg='#0f0d0d'
bg2='#1f1d1d'
root.config(bg=bg)
#try:
url = 'https://user-images.githubusercontent.com/47753578/135677892-c14221dc-e055-4e64-8150-9629b5ef5286.png'
raw_data = urllib.request.urlopen(url).read()
import warnings
warnings.filterwarnings("ignore")
b64_data = base64.encodestring(raw_data);
warnings.filterwarnings("always")
image = PhotoImage(data=b64_data)
root.tk.call('wm', 'iconphoto', root._w, image)
#except:
# pass
root.resizable(width=0, height=1)
root.geometry("1080x800")
Title = Label(root,text=f'HTML Syntax Highlighter v{version}',font=("Consolas", 25),pady=20,bg=bg,fg=fg).place(relx = 0.5,
rely =0,
anchor ='n')
options = list(Themes)
clicked = StringVar()
label = Label( root , text = "Pick a Theme (Themes are light until 'Default Dark')" ,bg=bg,fg=fg).place(relx = 0.5,
rely =0.075,
anchor ='n')
errout = Label( root , text = 'Use: `err` to start and end an error and `out` to start and end an output \n to get the simply have the string of `err` or `out` put a "\\" before it.' ,bg=bg,fg=fg).place(relx = 1,
rely =0.075,
anchor ='ne')
COLOR = StringVar()
combostyle = ttk.Style()
combostyle.theme_create('combostyle', parent='alt',
settings = {'TCombobox':
{'configure':
{'selectbackground': bg,
'fieldbackground': bg,
'background': bg2,
}}}
)
combostyle.theme_use('combostyle')
drop = ttk.Combobox( root ,textvariabl=COLOR )
drop['values'] = options
drop['state'] = 'readonly'
#drop.config(bg = bg,fg=fg)
#drop['menu'].config(bg=bg2,fg=fg)
drop.place(relx = 0.5,
rely =0.105,
anchor ='n')
#ISINLINE = BoolVar()
ISINLINE = IntVar()
Checkbutton(root, text='inline',bg=bg,fg=fg, variable=ISINLINE).place(relx = 0.6,
rely =0.105,
anchor ='n')
message ='''Please put your code here...'''
text_box = scrolledtext.ScrolledText(
root,
height=10,
width=130,bg=bg2,fg=fg
)
text_box.place(relx = 0.5,
rely =0.15,
anchor ='n')#(expand=True)
text_box.insert('end', message)
text_box.config(state='normal')
button = Button( root , text = "Highlight",command = lambda:Compute(text_box.get("1.0",'end-1c'),
COLOR.get(),
ISINLINE.get()),bg=bg,fg=fg).place(relx = 0.5,
rely =0.36,
anchor ='n')
button = Button( root , text = "Copy",command = lambda:pc.copy(OutText_box.get("1.0",'end-1c')),bg=bg,fg=fg).place(relx = 0.009,rely = 0.61,
anchor ='nw')
OutText_box = scrolledtext.ScrolledText(
root,
height=10,
width=130,bg=bg2,fg=fg
)
OutText_box.place(relx = 0.5,
rely =0.4,
anchor ='n')#(expand=True)
OutText_box.insert('end', 'HTML Output...')
OutText_box.config(state='disabled')
label2 = Label( root , text = "Output HTML code (What you want to copy) ↑ | ↓ Preview (Indentation, Bolding, and other attributes will exist in actual implementation, but may not display here)" ,bg=bg,fg=fg).place(relx = 0.5,
rely =0.615,
anchor ='n')
OUTPUT = HTMLScrolledText(root,height=15,width=130,html='')
OUTPUT.place(relx = 0.5,
rely =0.65,
anchor ='n')
## label.pack()
#button = Button( root , text = "click Me" , command = print(clicked.get())).pack()
## root.attributes("-topmost", True)
## root.attributes("-topmost", False)
root.mainloop()
def fillTemplate(HighlightedCode,inline):
global Border, Background, Foreground, Comment, String, Keywords, Builtins, Definitions, Colors, Numbers
if inline == True:
return f'<tt style="font-size:80%; border-radius: 5px; border: .1em solid {Border}; padding: 3px; background-color:{Background}; color:{Foreground};">{HighlightedCode}</tt> '
else:
return f'<p> </p><p style="font-family: Consolas; font-size:80%;line-height:1.25em; border: solid {Border}; border-radius: 5px; border-width:.1em .1em .1em .8em; padding: 10px; background-color:{Background}; color:{Foreground};">{HighlightedCode}</p><p> </p>'
"""
/‾/ /‾/‾‾‾‾‾‾‾‾/‾| /‾| /‾/
/ /___/ /‾‾‾/ /‾‾/ |/ | / /
/ ___ / / / / | | / |/ /
/ / / / / / / /|__/| | /____
/_/ /_/ /_/ /_/ |_|_____/
|‾\|‾|/‾‾\ /‾‾‾‾/‾‾\|‾| /‾‾\|‾‾‾\
| \ | /\ | | /‾‾| /\ | | | /\ | <> |
| \ | \/ | | \__| \/ | |_| \/ | <
|_|\_|\__/ \____\__/|____\__/|_|\_\
"""
# Head the function html_NoColor
def html_NoColor(code,inline = False):
""".|‾\|‾|/‾‾\ /‾‾‾‾/‾‾\|‾| /‾‾\|‾‾‾\ .
.| \ | /\ | | /‾‾| /\ | | | /\ | <> |.
.| \ | \/ | | \__| \/ | |_| \/ | < .
.|_|\_|\__/ \____\__/|____\__/|_|\_\.
"""
# Create a copy of code
rawcode = code
if rawcode[-6:]!=' <br> ':
rawcode+=' <br> '
# Loop through the replacements
for item in Replacers:
# and replace
rawcode = rawcode.replace(item,Replacers[item])
###### Finish Format ######
# Replace the quotation and less/greater than string subsitution and tabs
rawcode = rawcode.replace('“','"').replace('”','"').replace('⋞','<').replace('⋟','>').replace('\t',' '*3).replace(' ',' '*4)
# remove the last six characters
rawcode=rawcode[:-14]
# Complete the formatting
code = fillTemplate(rawcode,inline)
# Print the code
print(code)
'''
/‾/ /‾/‾‾‾‾‾‾‾‾/‾| /‾| /‾/
/ /___/ /‾‾‾/ /‾‾/ |/ | / /
/ ___ / / / / | | / |/ /
/ / / / / / / /|__/| | /____
/_/ /_/ /_/ /_/_____|_|_____/
/‾‾‾‾‾‾/‾‾‾‾‾\|‾‾‾‾‾\ | _____|
| /‾‾‾| /‾\ | |‾\ \| |___
| | | | | | | | | ___|
| \___| \_/ | |_/ /| |_____
\______\_____/|_____/ |_______|
'''
# Head the function htmlCode
def html_code(code,inline = False, Return = False, color = False):
if color != False:
setColorScheme(color)
global ColorToType
ColorToType = {
"Border" : Border,
"Background" : Background,
"Foreground" : Foreground,
"Comment" : Comment,
"String" : String,
"Keywords" : Keywords,
"Builtins" : Builtins,
"Definitions" : Definitions,
"Numbers" : Numbers,
"Error" : Errors,
"Output" : Output
}
# Create a copy of code
rawcode = code
# Loop through the replacements
for item in Replacers:
# and replace
rawcode = rawcode.replace(item,Replacers[item])
# Create an empty highlighted code string
HighlightedCode = ''
# If the code does not end in a new line, make it
if rawcode[-6:]!=' <br> ':
rawcode+=' <br> '
# Split the code with spaces to get a rough syntax separation
words = rawcode.split(' ')
# Set continuous highlighting booleans
InComment = False
InStringType1 = False
InStringType1_3 = False
InStringType2 = False
InStringType2_3 = False
InDef_Class = False
InErr = False
InOut = False
#access the global separators variable
global separators
# loop through the rough words
for WORD in words:
# Create an empty word
MYWORD = ''
# Split the word more completely
code_words = custom_split(separators,WORD)
# Filter out empty string
code_words = list(filter(None, code_words))
# Create a reinfusement map
heal_code = custom_split(list(filter(None, code_words)),WORD)
# Create a list for the code
Broken_Code = []
# Loop through the refined syntax
for word in code_words:
# create an appendWord variable with a default of word
appendWord = word
# If we are in a comment
if InComment:
# If we are at the end of the line
if word == '<br>':
# End the comment
appendWord = '</span>'+word
# Terminate the comment section
InComment = False
# Otherwise, if we are in a single quote string
elif InStringType1:
# If there is a single quote within our refined syntax
#print('-',word,'if', word.find('\'') != -1, "and(", word.find('\\\'')!=(word.find('\'')-1), "or", word.find('\\\'') ==-1,')')
#print()
if word.find('\'') != -1 and (word.find('\\\'')!=(word.find('\'')-1) or word.find('\\\'') ==-1):
# End the string
appendWord = word.replace('\'','\'</span>')
# Terminate the single quote string section
InStringType1 = False
# Otherwise, if we are in a triple single quote string
elif InStringType1_3:
# If there is a triple single quote within our refined syntax
if word.find('\'\'\'') != -1 and (word.find('\\\'\'\'')!=(word.find('\'\'\'')-1) or word.find('\\\'\'\'') ==-1):
# End the string
appendWord = word.replace('\'\'\'','\'\'\'</span>')
# Terminate the triple single quote string section
InStringType1_3 = False
# Otherwise, if we are in a double quote string
elif InStringType2:
# If there is a double quote within our refined syntax
if word.find('"') != -1 and (word.find('\\"')!=(word.find('"')-1) or word.find('\\"') ==-1):
# End the string
appendWord = word.replace('"','"</span>')
# Terminate the double quote string section
InStringType2 = False
# Otherwise, if we are in a triple double quote string
elif InStringType2_3:
# If there is a triple double quote within our refined syntax
if word.find('"""') != -1 and ( word.find('\\"""')!=(word.find('"""')-1) or word.find('\\"""') ==-1):
# End the string
appendWord = word.replace('"""','"""</span>')
# Terminate the triple double quote string section
InStringType2_3 = False
# Otherwise, if we are in the heading of a function or a class
elif InDef_Class:
# Make the word blue
appendWord = f'<span style=“color:{Definitions}”>'+word+'</span>'
# Terminate the function or a class heading section
InDef_Class = False
# Otherwise, if we are in an error
elif InErr:
# If an error is ending and
if word.find('`err`') != -1 and ( word.find('\\`err`')!=(word.find('`err`')-1) or word.find('\\`err`') ==-1):
# End the string
appendWord = word.replace('`err`','</span>')
# Terminate the triple double quote string section
InErr = False
appendWord = appendWord.replace('\`err`','`err`')
# Otherwise, if we are in an output
elif InOut:
# If an error is ending and
if word.find('`out`') != -1 and ( word.find('\\`out`')!=(word.find('`out`')-1) or word.find('\\`out`') ==-1):
# End the string
appendWord = word.replace('`out`','</span>')
# Terminate the triple double quote string section
InOut = False
appendWord = appendWord.replace('\`out`','`out`')
#Otherwise
else:
# If the word is not blank
if word != '':
# loop through the colors
for color in Colors:
# if the word is in the color
if word in Colors[color]:
# Make the word the color it should be
clr = ColorToType[color]
appendWord = (f'<span style=“color:{clr}”>'+word+ends[color])
elif color == "Numbers":
if word.isnumeric():
clr = ColorToType[color]
appendWord = (f'<span style=“color:{clr}”>'+word+ends[color])
elif color == 'Error':
if word.find('`err`') != -1 and ( word.find('\\`err`')!=(word.find('`err`')-1) or word.find('\\`err`') ==-1):
clr = ColorToType[color]
appendWord = word.replace('`err`',f'<span style=“color:{clr}”>',1)
InErr = True
if len(word)>len(' `err`') and (word[-5:]=='`err`'):
appendWord = appendWord.replace('`err`','</span>')
InErr = False
appendWord = appendWord.replace('\`err`','`err`')
elif color == 'Output':
if word.find('`out`') != -1 and ( word.find('\\`out`')!=(word.find('`out`')-1) or word.find('\\`out`') ==-1):
clr = ColorToType[color]
appendWord = word.replace('`out`',f'<span style=“color:{clr}”>',1)
InOut = True
if len(word)>len(' `out`') and (word[-5:]=='`out`'):
appendWord = appendWord.replace('`out`','</span>')
InOut = False
appendWord = appendWord.replace('\`out`','`out`')
# Set the first letter of the word
primary = word[0]
# get the first two and three letters of the word if it is multiple letters long
try:
secondary = word[:2]
secEnd = word[-2:]
except:
secondary = primary
secEnd = word[-1:]
try: tertiary = word[:3]
except: tertiary = secondary
try: qry = word[:4]
except: qry = tertiary
# If the first three letters are triple single quote string beginings
if tertiary == '\'\'\'' or (len(qry) == 4 and qry in str1):
appendWord = (f'<span style=“color:{String}”>'+word)
InStringType1_3 = True
# Otherwise, if the first one or two letters are single quote string beginings
elif primary == '\'' or secondary in str1:
# if the word sets off a string or end with a string begining or the ending is not a string ending
if len(word) == 1 or secondary in str1 or (word[-1] !='\'' or secEnd =='\\\''):
# Start the string section
InStringType1 = True
# Color the word an indefinite green
appendWord = (f'<span style=“color:{String}”>'+word)
# Otherwise
else:
#Start and end the string on the word
appendWord = (f'<span style=“color:{String}”>'+word+'</span>')
# Otherwise, if the first three letters are triple double quote string beginings
elif tertiary == '"""' or (len(qry) == 4 and qry in str2):
appendWord = (f'<span style=“color:{String}”>'+word)
InStringType2_3 = True
# Otherwise, if the first one or two letters are single quote string beginings
elif primary == '"' or secondary in str2:
# if the word sets off a string or end with a string begining or the ending is not a string ending
if len(word) == 1 or secondary in str2 or word[-1] !='"' or secEnd =='\\"' :
# Start the string section
InStringType2 = True
# Color the word an indefinite green
appendWord = (f'<span style=“color:{String}”>'+word)
# Otherwise
else:
#Start and end the string on the word
appendWord = (f'<span style=“color:{String}”>'+word+'</span>')
# Otherwise, if the word sets off a commetn
elif primary == '#':
# Color the word an indefinite red
appendWord = (f'<span style=“color:{Comment}”>'+word)
# Start off the comment section
InComment = True
# Otherwise, if the word was def or class
elif word == 'def' or word == 'class' :
# Start the def/class head section
InDef_Class = True
# Append the higlighted syntax
Broken_Code.append(appendWord)
############ reinfuse the subtracted string ##############
# prepare the heal_code list and the broken_code list
h1 = heal_code[0]
heal_code = list(filter(None, heal_code))
heal_code.append('')
while len(Broken_Code)< len(heal_code):
Broken_Code.append('')
# Loop through the items in the heal_code list
for i in range(len(heal_code)):
try:
# Reinfuse the subtracted characters
if h1 == '':
MYWORD +=Broken_Code[i]+heal_code[i]
else:
MYWORD +=heal_code[i]+Broken_Code[i]
# if it rasies an error
except:
# Help the debugging
print('An ERROR is being raised!')
print(WORD)
print(heal_code, f'len: {len(heal_code)}')
print(Broken_Code, f'len: {len(Broken_Code)}')
# raise the error
MYWORD +=Broken_Code[i]+heal_code[i]
# Append the word to the highlighted code
HighlightedCode += MYWORD+' '
###### Finish Format ######
# Replace the quotation and less/greater than string subsitution and tabs
HighlightedCode = HighlightedCode.replace('“','"').replace('”','"').replace('⋞','<').replace('⋟','>').replace('\t',' '*3).replace(' ',' '*2).replace(' ',' '*2)
# remove the last six characters
HighlightedCode=HighlightedCode[:-16]#6
# Complete the formatting
code = fillTemplate(HighlightedCode,inline)
# Print the code
print(code)
if Return == True:
return code
#htmlCode(input('Code Here:'))
class htmlCode():
""" ../‾‾‾‾‾‾/‾‾‾‾‾\|‾‾‾‾‾\ | _____|.
.| /‾‾‾| /‾\ | |‾\ \| |___....
.| | | | | | | | | ___|...
.| \___| \_/ | |_/ /| |_____..
..\______\_____/|_____/ |_______|.
"""
def __init__(self, code, inline=False, color = False):
print('⩋'*40+'\n')
if type(code)==tuple or type(code)==list:
self.code = ''
for section in code:
if type(color)==tuple or type(color)==list:
clr = color[code.index(section)]
else:
clr = color
self.code += html_code(section,inline = inline, Return = True,color=clr)[:-(6+7*(inline==False))]
else:
self.code = html_code(code,inline = inline, Return = True,color=color)
print('\n'+'⩊'*40)
def preview(self):
Preview(self.code)
def HighlightCode():
try:
inline = False
code = Input(f'Code (end code with "{DEFAULT_INPUT_TERMINATION_CODE}" on an empty line after the code):',ml=True)
if code.find('\n') ==-1: inline = bool(input('Inline? (True/False)'))
preview = bool(input('Preview? (True/False)'))
except ValueError:
print('Please make sure your input for the "Inline?" and "Preview?" prompts were booleans')
return
print()
code = html_code(code,inline = inline, Return = True)
if preview:Preview(code)
def ThemeDisplay():
code = []
colors = []
for Theme in Themes:
colors.append(Theme)
code.append( f'''# {Theme} Theme Test
def Function():
"""This Function is a test"""
return str(1101.2021)''')
htmlCode(code,color = colors).preview()
#Determine the theme
Themes = {}
ltNames = list(lThemes)
ltVals = list(lThemes.values())
dtNames = list(dThemes)
dtVals = list(dThemes.values())
for theme in range(len(lThemes)+len(dThemes)):
if theme < len(lThemes):
Themes[ltNames[theme]] = ltVals[theme]
else:
Themes[dtNames[theme-(len(lThemes))]] = dtVals[theme-(len(lThemes))]
def ThemePrompt():
global Themes
Themes = {}
print('Light themes')
ltNames = list(lThemes)
ltVals = list(lThemes.values())
dtNames = list(dThemes)
dtVals = list(dThemes.values())
for theme in range(len(lThemes)+len(dThemes)):
if theme == len(lThemes):
print('Dark themes:')
if theme < len(lThemes):
print(' ', str(theme+1)+'.',ltNames[theme])
Themes[ltNames[theme]] = ltVals[theme]
else:
print(' ', str(theme+1)+'.',dtNames[theme-(len(lThemes))])
Themes[dtNames[theme-(len(lThemes))]] = dtVals[theme-(len(lThemes))]
Color = input('\n- a display of all of the themes can be shown by the command: ThemeDisplay()\nWhich theme should we use?')
if Color.find('ThemeDisplay') != -1:
ThemeDisplay()
ThemePrompt()
else:
setColorScheme(Color)
if input('Use Interface? (y/n):').upper() == 'N':
ThemePrompt()
# Print a warning statement
print(f'''
-----------------------------------------------------------------------------------------------
===============================================================================================
###############################################################################################
Warning! Formatting issues may arise from using a python escape character like: "\\n", (a list
of these can be found at: https://www.w3schools.com/python/gloss_python_escape_characters.asp),
to fix this please replace the backslash part with two backslashes, for instance: "\\\\n".
Another method of solving this is to use the HighlightCode() function which takes a multiline
input of your code, terminated by the termination code: {DEFAULT_INPUT_TERMINATION_CODE}
Lastly, this program may not work for syntax highlighting every program. By using this,
you take on the responsibility to ensure that the highlighting is accurate.
###############################################################################################
===============================================================================================
-----------------------------------------------------------------------------------------------''')
# Print a guide
print('\n\nInitiate using the "htmlCode" function\n>>> htmlCode(\'\'\'CODE GOES HERE\'\'\') or >>> htmlCode(\'\'\'CODE GOES HERE\'\'\', inline = True)\nPlease use triple quotes for multiline code\n'+'-'*30+'\nThe htmlCode(\'\'\'CODE GOES HERE\'\'\') function returns an object that can be previewed using the OBJECT.preview() command.\n An immediate preview can be achieved by using the command htmlCode(\'\'\'CODE GOES HERE\'\'\').preview()\n')
print('Use: `err` to start and end an error and `out` to start and end an output - to get the simply have the string of `err` or `out` put a "\\" before it.')
else:
try:
Interface()
except:
Interface()
``` |
{
"source": "24werewolf/python_video_stab",
"score": 3
} |
#### File: python_video_stab/vidstab/download_videos.py
```python
from urllib.request import urlretrieve
REMOTE_OSTRICH_VID_PATH = 'https://s3.amazonaws.com/python-vidstab/ostrich.mp4'
REMOTE_TRUNCATED_OSTRICH_VID_PATH = 'https://s3.amazonaws.com/python-vidstab/trunc_video.avi'
REMOTE_SKATELINE_VID_PATH = 'https://s3.amazonaws.com/python-vidstab/thrasher.mp4'
def download_ostrich_video(download_to_path):
"""Download example shaky clip of ostrich used in README (mp4)
Video used with permission the HappyLiving YouTube channel.
Original video: https://www.youtube.com/watch?v=9pypPqbV_GM
:param download_to_path: path to save video to
:return: None
>>> from vidstab import VidStab, download_ostrich_video
>>> path = 'ostrich.mp4'
>>> download_ostrich_video(path)
>>>
>>> stabilizer = VidStab()
>>> stabilizer.stabilize(path, OUTPUT_VIDEO_PATH)
"""
urlretrieve(REMOTE_OSTRICH_VID_PATH, download_to_path)
def download_skateline_video(download_to_path=None):
"""Download additional testing video
NOT FOR GENERAL USE; VIDEO MIGHT BE REMOVED WITHOUT WARNING
:param download_to_path: path to save video to
:return: None
"""
urlretrieve(REMOTE_SKATELINE_VID_PATH, download_to_path)
def download_truncated_ostrich_video(download_to_path=None):
"""Download additional testing video
NOT FOR GENERAL USE; VIDEO MIGHT BE REMOVED WITHOUT WARNING
:param download_to_path: path to save video to
:return: None
"""
urlretrieve(REMOTE_TRUNCATED_OSTRICH_VID_PATH, download_to_path)
``` |
{
"source": "24wuw/bunner",
"score": 4
} |
#### File: 24wuw/bunner/bunner.py
```python
import pgzero, pgzrun, pygame, sys
from random import *
from enum import Enum
# Check Python version number. sys.version_info gives version as a tuple, e.g. if (3,7,2,'final',0) for version 3.7.2.
# Unlike many languages, Python can compare two tuples in the same way that you can compare numbers.
if sys.version_info < (3,5):
print("This game requires at least version 3.5 of Python. Please download it from www.python.org")
sys.exit()
# Check Pygame Zero version. This is a bit trickier because Pygame Zero only lets us get its version number as a string.
# So we have to split the string into a list, using '.' as the character to split on. We convert each element of the
# version number into an integer - but only if the string contains numbers and nothing else, because it's possible for
# a component of the version to contain letters as well as numbers (e.g. '2.0.dev0')
# We're using a Python feature called list comprehension - this is explained in the Bubble Bobble/Cavern chapter.
pgzero_version = [int(s) if s.isnumeric() else s for s in pgzero.__version__.split('.')]
if pgzero_version < [1,2]:
print("This game requires at least version 1.2 of Pygame Zero. You have version {0}. Please upgrade using the command 'pip3 install --upgrade pgzero'".format(pgzero.__version__))
sys.exit()
WIDTH = 480
HEIGHT = 800
TITLE = "Infinite Bunner"
ROW_HEIGHT = 40
# See what happens when you change this to True
DEBUG_SHOW_ROW_BOUNDARIES = False
# The MyActor class extends Pygame Zero's Actor class by allowing an object to have a list of child objects,
# which are drawn relative to the parent object.
class MyActor(Actor):
def __init__(self, image, pos, anchor=("center", "bottom")):
super().__init__(image, pos, anchor)
self.children = []
def draw(self, offset_x, offset_y):
self.x += offset_x
self.y += offset_y
super().draw()
for child_obj in self.children:
child_obj.draw(self.x, self.y)
self.x -= offset_x
self.y -= offset_y
def update(self):
for child_obj in self.children:
child_obj.update()
# The eagle catches the rabbit if it goes off the bottom of the screen
class Eagle(MyActor):
def __init__(self, pos):
super().__init__("eagles", pos)
self.children.append(MyActor("eagle", (0, -32)))
def update(self):
self.y += 12
class PlayerState(Enum):
ALIVE = 0
SPLAT = 1
SPLASH = 2
EAGLE = 3
# Constants representing directions
DIRECTION_UP = 0
DIRECTION_RIGHT = 1
DIRECTION_DOWN = 2
DIRECTION_LEFT = 3
direction_keys = [keys.UP, keys.RIGHT, keys.DOWN, keys.LEFT]
# X and Y directions indexed into by in_edge and out_edge in Segment
# The indices correspond to the direction numbers above, i.e. 0 = up, 1 = right, 2 = down, 3 = left
# Numbers 0 to 3 correspond to up, right, down, left
DX = [0,4,0,-4]
DY = [-4,0,4,0]
class Bunner(MyActor):
MOVE_DISTANCE = 10
def __init__(self, pos):
super().__init__("blank", pos)
self.state = PlayerState.ALIVE
self.direction = 2
self.timer = 0
# If a control input is pressed while the rabbit is in the middle of jumping, it's added to the input queue
self.input_queue = []
# Keeps track of the furthest distance we've reached so far in the level, for scoring
# (Level Y coordinates decrease as the screen scrolls)
self.min_y = self.y
def handle_input(self, dir):
# Find row that player is trying to move to. This may or may not be the row they're currently standing on,
# depending on whether the proposed movement would take them onto a different row
for row in game.rows:
if row.y == self.y + Bunner.MOVE_DISTANCE * DY[dir]:
# Found the target row
# Can the player move to the new location? Can't move if there's something in the way
# (or if the new location is off the screen)
if row.allow_movement(self.x + Bunner.MOVE_DISTANCE * DX[dir]):
# It's okay to move here, so set direction and timer. Player will move one pixel per frame
# for the specified number of frames
self.direction = dir
self.timer = Bunner.MOVE_DISTANCE
game.play_sound("jump", 1)
# No need to continue searching
return
def update(self):
# Check each control direction
for direction in range(4):
if key_just_pressed(direction_keys[direction]):
self.input_queue.append(direction)
if self.state == PlayerState.ALIVE:
# While the player is alive, the timer variable is used for movement. If it's zero, the player is on
# the ground. If it's above zero, they're currently jumping to a new location.
# Are we on the ground, and are there inputs to process?
if self.timer == 0 and len(self.input_queue) > 0:
# Take the next input off the queue and process it
self.handle_input(self.input_queue.pop(0))
land = False
if self.timer > 0:
# Apply movement
self.x += DX[self.direction]
self.y += DY[self.direction]
self.timer -= 1
land = self.timer == 0 # If timer reaches zero, we've just landed
current_row = None
for row in game.rows:
if row.y == self.y:
current_row = row
break
if current_row:
# Row.check receives the player's X coordinate and returns the new state the player should be in
# (normally ALIVE, but SPLAT or SPLASH if they've collided with a vehicle or if they've fallen in
# the water). It also returns a second result which is only used if there was a collision, and even
# then only for certain collisions. When the new state is SPLAT, we will add a new child object to the
# current row, with the appropriate 'splat' image. In this case, the second result returned from
# check_collision is a Y offset which affects the position of this new child object. If the player is
# hit by a car the Y offset is zero, but if they are hit by a train the returned offset is 8 as this
# positioning looks a little better.
self.state, dead_obj_y_offset = current_row.check_collision(self.x)
if self.state == PlayerState.ALIVE:
# Water rows move the player along the X axis, if standing on a log
self.x += current_row.push()
if land:
# Just landed - play sound effect appropriate to the current row
current_row.play_sound()
else:
if self.state == PlayerState.SPLAT:
# Add 'splat' graphic to current row with the specified position and Y offset
current_row.children.insert(0, MyActor("splat" + str(self.direction), (self.x, dead_obj_y_offset)))
self.timer = 100
else:
# There's no current row - either because player is currently changing row, or the row they were on
# has been deleted. Has the player gone off the bottom of the screen?
if self.y > game.scroll_pos + HEIGHT + 80:
# Create eagle
game.eagle = Eagle((self.x, game.scroll_pos))
self.state = PlayerState.EAGLE
self.timer = 150
game.play_sound("eagle")
# Limit x position so player doesn't go off the screen. The player movement code doesn't allow jumping off
# the screen, but without this line, the player could be carried off the screen by a log
self.x = max(16, min(WIDTH - 16, self.x))
else:
# Not alive - timer now counts down prior to game over screen
self.timer -= 1
# Keep track of the furthest we've got in the level
self.min_y = min(self.min_y, self.y)
# Choose sprite image
self.image = "blank"
if self.state == PlayerState.ALIVE:
if self.timer > 0:
self.image = "jump" + str(self.direction)
else:
self.image = "sit" + str(self.direction)
elif self.state == PlayerState.SPLASH and self.timer > 84:
# Display appropriate 'splash' animation frame. Note that we use a different technique to display the
# 'splat' image – see: comments earlier in this method. The reason two different techniques are used is
# that the splash image should be drawn on top of other objects, whereas the splat image must be drawn
# underneath other objects. Since the player is always drawn on top of other objects, changing the player
# sprite is a suitable method of displaying the splash image.
self.image = "splash" + str(int((100 - self.timer) / 2))
# Mover is the base class for Car, Log and Train
# The thing they all have in common, besides inheriting from MyActor, is that they need to store whether they're
# moving left or right and update their X position each frame
class Mover(MyActor):
def __init__(self, dx, image, pos):
super().__init__(image, pos)
self.dx = dx
def update(self):
self.x += self.dx
class Car(Mover):
# These correspond to the indicies of the lists self.sounds and self.played. Used in Car.update to trigger
# playing of the corresponding sound effects.
SOUND_ZOOM = 0
SOUND_HONK = 1
def __init__(self, dx, pos):
image = "car" + str(randint(0, 3)) + ("0" if dx < 0 else "1")
super().__init__(dx, image, pos)
# Cars have two sound effects. Each can only play once. We use this
# list to keep track of which has already played.
self.played = [False, False]
self.sounds = [("zoom", 6), ("honk", 4)]
def play_sound(self, num):
if not self.played[num]:
# Select a sound and pass the name and count to Game.play_sound.
# The asterisk operator unpacks the two items and passes them to play_sound as separate arguments
game.play_sound(*self.sounds[num])
self.played[num] = True
class Log(Mover):
def __init__(self, dx, pos):
image = "log" + str(randint(0, 1))
super().__init__(dx, image, pos)
class Train(Mover):
def __init__(self, dx, pos):
image = "train" +str(randint(0, 2)) + ("0" if dx < 0 else "1")
super().__init__(dx, image, pos)
# Row is the base class for Pavement, Grass, Dirt, Rail and ActiveRow
# Each row corresponds to one of the 40 pixel high images which make up sections of grass, road, etc.
# The last row of each section is 60 pixels high and overlaps with the row above
class Row(MyActor):
def __init__(self, base_image, index, y):
# base_image and index form the name of the image file to use
# Last argument is the anchor point to use
super().__init__(base_image + str(index), (0, y), ("left", "bottom"))
self.index = index
# X direction of moving elements on this row
# Zero by default - only ActiveRows (see below) and Rail have moving elements
self.dx = 0
def next(self):
# Overridden in child classes. See comments in Game.update
return
def collide(self, x, margin=0):
# Check to see if the given X coordinate is in contact with any of this row's child objects (e.g. logs, cars,
# hedges). A negative margin makes the collideable area narrower than the child object's sprite, while a
# positive margin makes the collideable area wider.
for child_obj in self.children:
if x >= child_obj.x - (child_obj.width / 2) - margin and x < child_obj.x + (child_obj.width / 2) + margin:
return child_obj
return None
def push(self):
return 0
def check_collision(self, x):
# Returns the new state the player should be in, based on whether or not the player collided with anything on
# this road. As this class is the base class for other types of row, this method defines the default behaviour
# – i.e. unless a subclass overrides this method, the player can walk around on a row without dying.
return PlayerState.ALIVE, 0
def allow_movement(self, x):
# Ensure the player can't walk off the left or right sides of the screen
return x >= 16 and x <= WIDTH-16
class ActiveRow(Row):
def __init__(self, child_type, dxs, base_image, index, y):
super().__init__(base_image, index, y)
self.child_type = child_type # Class to be used for child objects (e.g. Car)
self.timer = 0
self.dx = choice(dxs) # Randomly choose a direction for cars/logs to move
# Populate the row with child objects (cars or logs). Without this, the row would initially be empty.
x = -WIDTH / 2 - 70
while x < WIDTH / 2 + 70:
x += randint(240, 480)
pos = (WIDTH / 2 + (x if self.dx > 0 else -x), 0)
self.children.append(self.child_type(self.dx, pos))
def update(self):
super().update()
# Recreate the children list, excluding any which are too far off the edge of the screen to be visible
self.children = [c for c in self.children if c.x > -70 and c.x < WIDTH + 70]
self.timer -= 1
# Create new child objects on a random interval
if self.timer < 0:
pos = (WIDTH + 70 if self.dx < 0 else -70, 0)
self.children.append(self.child_type(self.dx, pos))
# 240 is minimum distance between the start of one child object and the start of the next, assuming its
# speed is 1. If the speed is 2, they can occur twice as frequently without risk of overlapping with
# each other. The maximum distance is double the minimum distance (1 + random value of 1)
self.timer = (1 + random()) * (240 / abs(self.dx))
# Grass rows sometimes contain hedges
class Hedge(MyActor):
def __init__(self, x, y, pos):
super().__init__("bush"+str(x)+str(y), pos)
def generate_hedge_mask():
# In this context, a mask is a series of boolean values which allow or prevent parts of an underlying image from showing through.
# This function creates a mask representing the presence or absence of hedges in a Grass row. False means a hedge
# is present, True represents a gap. Initially we create a list of 12 elements. For each element there is a small
# chance of a gap, but normally all element will be False, representing a hedge. We then randomly set one item to
# True, to ensure that there is always at least one gap that the player can get through
mask = [random() < 0.01 for i in range(12)]
mask[randint(0, 11)] = True # force there to be one gap
# We then widen gaps to a minimum of 3 tiles. This happens in two steps.
# First, we recreate the mask list, except this time whether a gap is present is based on whether there was a gap
# in either the original element or its neighbouring elements. When using Python's built-in sum function, a value
# of True is treated as 1 and False as 0. We must use the min/max functions to ensure that we don't try to look
# at a neighbouring element which doesn't exist (e.g. there is no neighbour to the right of the last element)
mask = [sum(mask[max(0, i-1):min(12, i+2)]) > 0 for i in range(12)]
# We want to ensure gaps are a minimum of 3 tiles wide, but the previous line only ensures a minimum gap of 2 tiles
# at the edges. The last step is to return a new list consisting of the old list with the first and last elements duplicated
return [mask[0]] + mask + 2 * [mask[-1]]
def classify_hedge_segment(mask, previous_mid_segment):
# This function helps determine which sprite should be used by a particular hedge segment. Hedge sprites are numbered
# 00, 01, 10, 11, 20, 21 - up to 51. The second number indicates whether it's a bottom (0) or top (1) segment,
# but this method is concerned only with the first number. 0 represents a single-tile-width hedge. 1 and 2 represent
# the left-most or right-most sprites in a multi-tile-width hedge. 3, 4 and 5 all represent middle pieces in hedges
# which are 3 or more tiles wide.
# mask is a list of 4 boolean values - a slice from the list generated by generate_hedge_mask. True represents a gap
# and False represents a hedge. mask[1] is the item we're currently looking at.
if mask[1]:
# mask[1] == True represents a gap, so there will be no hedge sprite at this location
sprite_x = None
else:
# There's a hedge here - need to check either side of it to see if it's a single-width, left-most, right-most
# or middle piece. The calculation generates a number from 0 to 3 accordingly. Note that when boolean values
# are used in arithmetic in Python, False is treated as being 0 and True as 1.
sprite_x = 3 - 2 * mask[0] - mask[2]
if sprite_x == 3:
# If this is a middle piece, to ensure the piece tiles correctly, we alternate between sprites 3 and 4.
# If the next piece is going to be the last of this hedge section (sprite 2), we need to make sure that sprite 3
# does not precede it, as the two do not tile together correctly. In this case we should use sprite 5.
# mask[3] tells us whether there's a gap 2 tiles to the right - which means the next tile will be sprite 2
if previous_mid_segment == 4 and mask[3]:
return 5, None
else:
# Alternate between 3 and 4
if previous_mid_segment == None or previous_mid_segment == 4:
sprite_x = 3
elif previous_mid_segment == 3:
sprite_x = 4
return sprite_x, sprite_x
else:
# Not a middle piece
return sprite_x, None
class Grass(Row):
def __init__(self, predecessor, index, y):
super().__init__("grass", index, y)
# In computer graphics, a mask is a series of boolean (true or false) values indicating which parts of an image
# will be transparent. Grass rows may contain hedges which block the player's movement, and we use a similar
# mechanism here. In our hedge mask, values of False mean a hedge is present, while True means there is a gap
# in the hedges. Hedges are two rows high - once hedges have been created on a row, the pattern will be
# duplicated on the next row (although the sprites will be different - e.g. there are separate sprites
# for the top-left and bottom-left corners of a hedge). Note that the upper sprites overlap with the row above.
self.hedge_row_index = None # 0 or 1, or None if no hedges on this row
self.hedge_mask = None
if not isinstance(predecessor, Grass) or predecessor.hedge_row_index == None:
# Create a brand-new set of hedges? We will only create hedges if the previous row didn't have any.
# We also only want hedges to appear on certain types of grass row, and on only a random selection
# of rows
if random() < 0.5 and index > 7 and index < 14:
self.hedge_mask = generate_hedge_mask()
self.hedge_row_index = 0
elif predecessor.hedge_row_index == 0:
self.hedge_mask = predecessor.hedge_mask
self.hedge_row_index = 1
if self.hedge_row_index != None:
# See comments in classify_hedge_segment for explanation of previous_mid_segment
previous_mid_segment = None
for i in range(1, 13):
sprite_x, previous_mid_segment = classify_hedge_segment(self.hedge_mask[i - 1:i + 3], previous_mid_segment)
if sprite_x != None:
self.children.append(Hedge(sprite_x, self.hedge_row_index, (i * 40 - 20, 0)))
def allow_movement(self, x):
# allow_movement in the base class ensures that the player can't walk off the left and right sides of the
# screen. The call to our own collide method ensures that the player can't walk through hedges. The margin of
# 8 prevents the player sprite from overlapping with the edge of a hedge.
return super().allow_movement(x) and not self.collide(x, 8)
def play_sound(self):
game.play_sound("grass", 1)
def next(self):
if self.index <= 5:
row_class, index = Grass, self.index + 8
elif self.index == 6:
row_class, index = Grass, 7
elif self.index == 7:
row_class, index = Grass, 15
elif self.index >= 8 and self.index <= 14:
row_class, index = Grass, self.index + 1
else:
row_class, index = choice((Road, Water)), 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Dirt(Row):
def __init__(self, predecessor, index, y):
super().__init__("dirt", index, y)
def play_sound(self):
game.play_sound("dirt", 1)
def next(self):
if self.index <= 5:
row_class, index = Dirt, self.index + 8
elif self.index == 6:
row_class, index = Dirt, 7
elif self.index == 7:
row_class, index = Dirt, 15
elif self.index >= 8 and self.index <= 14:
row_class, index = Dirt, self.index + 1
else:
row_class, index = choice((Road, Water)), 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Water(ActiveRow):
def __init__(self, predecessor, index, y):
# dxs contains a list of possible directions (and speeds) in which child objects (in this case, logs) on this
# row could move. We pass the lists to the constructor of the base class, which randomly chooses one of the
# directions. We want logs on alternate rows to move in opposite directions, so we take advantage of the fact
# that that in Python, multiplying a list by True or False results in either the same list, or an empty list.
# So by looking at the direction of child objects on the previous row (predecessor.dx), we can decide whether
# child objects on this row should move left or right. If this is the first of a series of Water rows,
# predecessor.dx will be zero, so child objects could move in either direction.
dxs = [-2,-1]*(predecessor.dx >= 0) + [1,2]*(predecessor.dx <= 0)
super().__init__(Log, dxs, "water", index, y)
def update(self):
super().update()
for log in self.children:
# Child (log) object positions are relative to the parent row. If the player exists, and the player is at the
# same Y position, and is colliding with the current log, make the log dip down into the water slightly
if game.bunner and self.y == game.bunner.y and log == self.collide(game.bunner.x, -4):
log.y = 2
else:
log.y = 0
def push(self):
# Called when the player is standing on a log on this row, so player object can be moved at the same speed and
# in the same direction as the log
return self.dx
def check_collision(self, x):
# If we're colliding with a log, that's a good thing!
# margin of -4 ensures we can't stand right on the edge of a log
if self.collide(x, -4):
return PlayerState.ALIVE, 0
else:
game.play_sound("splash")
return PlayerState.SPLASH, 0
def play_sound(self):
game.play_sound("log", 1)
def next(self):
# After 2 water rows, there's a 50-50 chance of the next row being either another water row, or a dirt row
if self.index == 7 or (self.index >= 1 and random() < 0.5):
row_class, index = Dirt, randint(4,6)
else:
row_class, index = Water, self.index + 1
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Road(ActiveRow):
def __init__(self, predecessor, index, y):
# Specify the possible directions and speeds from which the movement of cars on this row will be chosen
# We use Python's set data structure to specify that the car velocities on this row will be any of the numbers
# from -5 to 5, except for zero or the velocity of the cars on the previous row
dxs = list(set(range(-5, 6)) - set([0, predecessor.dx]))
super().__init__(Car, dxs, "road", index, y)
def update(self):
super().update()
# Trigger car sound effects. The zoom effect should play when the player is on the row above or below the car,
# the honk effect should play when the player is on the same row.
for y_offset, car_sound_num in [(-ROW_HEIGHT, Car.SOUND_ZOOM), (0, Car.SOUND_HONK), (ROW_HEIGHT, Car.SOUND_ZOOM)]:
# Is the player on the appropriate row?
if game.bunner and game.bunner.y == self.y + y_offset:
for child_obj in self.children:
# The child object must be a car
if isinstance(child_obj, Car):
# The car must be within 100 pixels of the player on the x-axis, and moving towards the player
# child_obj.dx < 0 is True or False depending on whether the car is moving left or right, and
# dx < 0 is True or False depending on whether the player is to the left or right of the car.
# If the results of these two comparisons are different, the car is moving towards the player.
# Also, for the zoom sound, the car must be travelling faster than one pixel per frame
dx = child_obj.x - game.bunner.x
if abs(dx) < 100 and ((child_obj.dx < 0) != (dx < 0)) and (y_offset == 0 or abs(child_obj.dx) > 1):
child_obj.play_sound(car_sound_num)
def check_collision(self, x):
if self.collide(x):
game.play_sound("splat", 1)
return PlayerState.SPLAT, 0
else:
return PlayerState.ALIVE, 0
def play_sound(self):
game.play_sound("road", 1)
def next(self):
if self.index == 0:
row_class, index = Road, 1
elif self.index < 5:
# 80% chance of another road
r = random()
if r < 0.8:
row_class, index = Road, self.index + 1
elif r < 0.88:
row_class, index = Grass, randint(0,6)
elif r < 0.94:
row_class, index = Rail, 0
else:
row_class, index = Pavement, 0
else:
# We've reached maximum of 5 roads in a row, so choose something else
r = random()
if r < 0.6:
row_class, index = Grass, randint(0,6)
elif r < 0.9:
row_class, index = Rail, 0
else:
row_class, index = Pavement, 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Pavement(Row):
def __init__(self, predecessor, index, y):
super().__init__("side", index, y)
def play_sound(self):
game.play_sound("sidewalk", 1)
def next(self):
if self.index < 2:
row_class, index = Pavement, self.index + 1
else:
row_class, index = Road, 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
# Note that Rail does not inherit from ActiveRow
class Rail(Row):
def __init__(self, predecessor, index, y):
super().__init__("rail", index, y)
self.predecessor = predecessor
def update(self):
super().update()
# Only Rail rows with index 1 have trains on them
if self.index == 1:
# Recreate the children list, excluding any which are too far off the edge of the screen to be visible
self.children = [c for c in self.children if c.x > -1000 and c.x < WIDTH + 1000]
# If on-screen, and there is currently no train, and with a 1% chance every frame, create a train
if self.y < game.scroll_pos+HEIGHT and len(self.children) == 0 and random() < 0.01:
# Randomly choose a direction for trains to move. This can be different for each train created
dx = choice([-20, 20])
self.children.append(Train(dx, (WIDTH + 1000 if dx < 0 else -1000, -13)))
game.play_sound("bell")
game.play_sound("train", 2)
def check_collision(self, x):
if self.index == 2 and self.predecessor.collide(x):
game.play_sound("splat", 1)
return PlayerState.SPLAT, 8 # For the meaning of the second return value, see comments in Bunner.update
else:
return PlayerState.ALIVE, 0
def play_sound(self):
game.play_sound("grass", 1)
def next(self):
if self.index < 3:
row_class, index = Rail, self.index + 1
else:
item = choice( ((Road, 0), (Water, 0)) )
row_class, index = item[0], item[1]
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Game:
def __init__(self, bunner=None):
self.bunner = bunner
self.looped_sounds = {}
try:
if bunner:
music.set_volume(0.4)
else:
music.play("theme")
music.set_volume(1)
except:
pass
self.eagle = None
self.frame = 0
# First (bottom) row is always grass
self.rows = [Grass(None, 0, 0)]
self.scroll_pos = -HEIGHT
def update(self):
if self.bunner:
# Scroll faster if the player is close to the top of the screen. Limit scroll speed to
# between 1 and 3 pixels per frame.
self.scroll_pos -= max(1, min(3, float(self.scroll_pos + HEIGHT - self.bunner.y) / (HEIGHT // 4)))
else:
self.scroll_pos -= 1
# Recreate the list of rows, excluding any which have scrolled off the bottom of the screen
self.rows = [row for row in self.rows if row.y < int(self.scroll_pos) + HEIGHT + ROW_HEIGHT * 2]
# In Python, a negative index into a list gives you items in reverse order, e.g. my_list[-1] gives you the
# last element of a list. Here, we look at the last row in the list - which is the top row - and check to see
# if it has scrolled sufficiently far down that we need to add a new row above it. This may need to be done
# multiple times - particularly when the game starts, as only one row is added to begin with.
while self.rows[-1].y > int(self.scroll_pos)+ROW_HEIGHT:
new_row = self.rows[-1].next()
self.rows.append(new_row)
# Update all rows, and the player and eagle (if present)
for obj in self.rows + [self.bunner, self.eagle]:
if obj:
obj.update()
# Play river and traffic sound effects, and adjust volume each frame based on the player's proximity to rows
# of the appropriate types. For each such row, a number is generated representing how much the row should
# contribute to the volume of the sound effect. These numbers are added together by Python's sum function.
# On the following line we ensure that the volume can never be above 40% of the maximum possible volume.
if self.bunner:
for name, count, row_class in [("river", 2, Water), ("traffic", 3, Road)]:
# The first line uses a list comprehension to get each row of the appropriate type, e.g. Water rows
# if we're currently updating the "river" sound effect.
volume = sum([16.0 / max(16.0, abs(r.y - self.bunner.y)) for r in self.rows if isinstance(r, row_class)]) - 0.2
volume = min(0.4, volume)
self.loop_sound(name, count, volume)
return self
def draw(self):
# Create a list of all objects which need to be drawn. This includes all rows, plus the player
# Using list(s.rows) means we're creating a copy of that list to use - we don't want to create a reference
# to it as that would mean we're modifying the original list's contents
all_objs = list(self.rows)
if self.bunner:
all_objs.append(self.bunner)
# We want to draw objects in order based on their Y position. In general, objects further down the screen should be drawn
# after (and therefore in front of) objects higher up the screen. We can use Python's built-in sort function
# to put the items in the desired order, before we draw the The following function specifies the criteria
# used to decide how the objects are sorted.
def sort_key(obj):
# Adding 39 and then doing an integer divide by 40 (the height of each row) deals with the situation where
# the player sprite would otherwise be drawn underneath the row below. This could happen when the player
# is moving up or down. If you assume that it occupies a 40x40 box which can be at an arbitrary y offset,
# it generates the row number of the bottom row that that box overlaps. If the player happens to be
# perfectly aligned to a row, adding 39 and dividing by 40 has no effect on the result. If it isn't, even
# by a single pixel, the +39 causes it to be drawn one row later.
return (obj.y + 39) // ROW_HEIGHT
# Sort list using the above function to determine order
all_objs.sort(key=sort_key)
# Always draw eagle on top of everything
all_objs.append(self.eagle)
for obj in all_objs:
if obj:
# Draw the object, taking the scroll position into account
obj.draw(0, -int(self.scroll_pos))
if DEBUG_SHOW_ROW_BOUNDARIES:
for obj in all_objs:
if obj and isinstance(obj, Row):
pygame.draw.rect(screen.surface, (255, 255, 255), pygame.Rect(obj.x, obj.y - int(self.scroll_pos), screen.surface.get_width(), ROW_HEIGHT), 1)
screen.draw.text(str(obj.index), (obj.x, obj.y - int(self.scroll_pos) - ROW_HEIGHT))
def score(self):
return int(-320 - game.bunner.min_y) // 40
def play_sound(self, name, count=1):
try:
# Some sounds have multiple varieties. If count > 1, we'll randomly choose one from those
# We don't play any sounds if there is no player (e.g. if we're on the menu)
if self.bunner:
# Pygame Zero allows you to write things like 'sounds.explosion.play()'
# This automatically loads and plays a file named 'explosion.wav' (or .ogg) from the sounds folder (if
# such a file exists)
# But what if you have files named 'explosion0.ogg' to 'explosion5.ogg' and want to randomly choose
# one of them to play? You can generate a string such as 'explosion3', but to use such a string
# to access an attribute of Pygame Zero's sounds object, we must use Python's built-in function getattr
sound = getattr(sounds, name + str(randint(0, count - 1)))
sound.play()
except:
# If a sound fails to play, ignore the error
pass
def loop_sound(self, name, count, volume):
try:
# Similar to play_sound above, but for looped sounds we need to keep a reference to the sound so that we can
# later modify its volume or turn it off. We use the dictionary self.looped_sounds for this - the sound
# effect name is the key, and the value is the corresponding sound reference.
if volume > 0 and not name in self.looped_sounds:
full_name = name + str(randint(0, count - 1))
sound = getattr(sounds, full_name) # see play_sound method above for explanation
sound.play(-1) # -1 means sound will loop indefinitely
self.looped_sounds[name] = sound
if name in self.looped_sounds:
sound = self.looped_sounds[name]
if volume > 0:
sound.set_volume(volume)
else:
sound.stop()
del self.looped_sounds[name]
except:
# If a sound fails to play, ignore the error
pass
def stop_looped_sounds(self):
try:
for sound in self.looped_sounds.values():
sound.stop()
self.looped_sounds.clear()
except:
# If sound system is not working/present, ignore the error
pass
# Dictionary to keep track of which keys are currently being held down
key_status = {}
# Was the given key just pressed? (i.e. is it currently down, but wasn't down on the previous frame?)
def key_just_pressed(key):
result = False
# Get key's previous status from the key_status dictionary. The dictionary.get method allows us to check for a given
# entry without giving an error if that entry is not present in the dictionary. False is the default value returned
# when the key is not present.
prev_status = key_status.get(key, False)
# If the key wasn't previously being pressed, but it is now, we're going to return True
if not prev_status and keyboard[key]:
result = True
# Before we return, we need to update the key's entry in the key_status dictionary (or create an entry if there
# wasn't one already
key_status[key] = keyboard[key]
return result
def display_number(n, colour, x, align):
# align: 0 for left, 1 for right
n = str(n) # Convert number to string
for i in range(len(n)):
screen.blit("digit" + str(colour) + n[i], (x + (i - len(n) * align) * 25, 0))
# Pygame Zero calls the update and draw functions each frame
class State(Enum):
MENU = 1
PLAY = 2
GAME_OVER = 3
def update():
global state, game, high_score
if state == State.MENU:
if key_just_pressed(keys.SPACE):
state = State.PLAY
game = Game(Bunner((240, -320)))
else:
game.update()
elif state == State.PLAY:
# Is it game over?
if game.bunner.state != PlayerState.ALIVE and game.bunner.timer < 0:
# Update high score
high_score = max(high_score, game.score())
# Write high score file
try:
with open("high.txt", "w") as file:
file.write(str(high_score))
except:
# If an error occurs writing the file, just ignore it and carry on, rather than crashing
pass
state = State.GAME_OVER
else:
game.update()
elif state == State.GAME_OVER:
# Switch to menu state, and create a new game object without a player
if key_just_pressed(keys.SPACE):
game.stop_looped_sounds()
state = State.MENU
game = Game()
def draw():
game.draw()
if state == State.MENU:
screen.blit("title", (0, 0))
screen.blit("start" + str([0, 1, 2, 1][game.scroll_pos // 6 % 4]), ((WIDTH - 270) // 2, HEIGHT - 240))
elif state == State.PLAY:
# Display score and high score
display_number(game.score(), 0, 0, 0)
display_number(high_score, 1, WIDTH - 10, 1)
elif state == State.GAME_OVER:
# Display "Game Over" image
screen.blit("gameover", (0, 0))
# Set up sound system
try:
pygame.mixer.quit()
pygame.mixer.init(44100, -16, 2, 512)
pygame.mixer.set_num_channels(16)
except:
# If an error occurs, just ignore it
pass
# Load high score from file
try:
with open("high.txt", "r") as f:
high_score = int(f.read())
except:
# If opening the file fails (likely because it hasn't yet been created), set high score to 0
high_score = 0
# Set the initial game state
state = State.MENU
# Create a new Game object, without a Player object
game = Game()
pgzrun.go()
``` |
{
"source": "2502302255/al789",
"score": 3
} |
#### File: 2502302255/al789/example_02.py
```python
from main.urllib_ps import pagesource
from main.remove_control_characters import remove_control_characters
import config
import re
from xml.sax.saxutils import unescape
import webapp2
siteurl = 'http://www.example.com/rss.xml'
rssname = ''
def filter():
result = pagesource(siteurl)
# result = remove_control_characters(result)
# result = re.aub('', '', result)
# result = re.aub('', '', result)
# result = re.sub('<(/|)div.*?>', '', result)
return result
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/xml'
self.response.out.write(filter())
app = webapp2.WSGIApplication([('/' + config.subdir4rss + '/' + rssname, MainPage)], debug=True)
```
#### File: al789/main/Entdecker.py
```python
__author__ = '<NAME>, <EMAIL>'
import chardet
import urlfetch_ps
import urllib_ps
import re
import random
import time
import urlparse
'''''''''''''''
抓正文及评论
'''''''''''''''
def ausfuehren(lib_or_fetch, artikelurl, SeiteQuelle, reg4nextpage, reg4text, reg4comment, vtext, comments):
#无评论
if len(reg4comment) == 0: pass
#有评论
else:
cmt_tmp = re.findall(reg4comment, SeiteQuelle.replace(r'"', r'"').replace('\\', ''))
if len(cmt_tmp) != 0:
#将评论变成一个字符串,不论评论的正则里有无分组
# for i in cmt_tmp:
# comments_tmp = ["".join(i) for i in cmt_tmp]
comments_tmp = ["".join(i) for i in cmt_tmp]
comments = "".join(comments_tmp)
else:
comments = '<br/><p style="color:#1890cd;font-size:25px;">No comments/gallery</p>'
# print comments
#无分页
if len(reg4nextpage) == 0: pass
#有分页
else:
#判断正文正则结果是否只有一组
if isinstance(re.findall(reg4text, SeiteQuelle)[0], tuple) is False:
while re.search(reg4nextpage, SeiteQuelle):
artikelurl_temp = re.findall(reg4nextpage, SeiteQuelle)[0]
if re.search(r'://', artikelurl_temp):
artikelurl = artikelurl_temp
else:
artikelurl = urlparse.urljoin(artikelurl, artikelurl_temp)
SeiteQuelle = lib_or_fetch(artikelurl)
Content = re.findall(reg4text, SeiteQuelle)
if len(Content) != 0:
vtext.append(Content[0]) #移除decode('utf-8')
elif re.search(r'ERROR! QwQ', SeiteQuelle):
vtext.append('<span style="color:#ff0000;font-size:30px;">' + SeiteQuelle + '</span>')
else:
vtext.append('<span style="color:#ff0000;font-size:30px;">Please check your regex</span>')
time.sleep(float(random.sample(range(1, 6), 1)[0]))
else:
while re.search(reg4nextpage, SeiteQuelle):
artikelurl_temp = re.findall(reg4nextpage, SeiteQuelle)[0]
if re.search(r'://', artikelurl_temp):
artikelurl = artikelurl_temp
else:
artikelurl = urlparse.urljoin(artikelurl, artikelurl_temp)
SeiteQuelle = lib_or_fetch(artikelurl)
Content = re.findall(reg4text, SeiteQuelle)
if len(Content) != 0:
text_temp = Content
for e in txt_temp:
vtext.append("".join(e)) #移除decode('utf-8')
elif re.search(r'ERROR! QwQ', SeiteQuelle):
vtext.append('<span style="color:#ff0000;font-size:30px;">' + SeiteQuelle + '</span>')
else:
vtext.append('<span style="color:#ff0000;font-size:30px;">Please check your regex</span>')
time.sleep(float(random.sample(range(1, 6), 1)[0]))
return vtext, comments
```
#### File: al789/main/feed_fulltext.py
```python
__author__ = '<NAME>, <EMAIL>'
import urlfetch_ps
import urllib_ps
import Entdecker
from remove_control_characters import remove_control_characters
import chardet
import random
import re
import urlparse
import threading
import time
from lxml import etree
from lxml.etree import CDATA
from xml.sax.saxutils import unescape
ISOTIMEFORMAT = '%a, %e %b %Y %H:%M:%S %z'
''''''''''''''''''''''''''''''
'''从feed抓取全文,多线程 '''
''''''''''''''''''''''''''''''
def ausfuehren(lib_or_fetch, siteurl, reg4nextpage, reg4text, reg4comment, Anzahl, *custom_para):
if lib_or_fetch == 'use_urlfetch':
lib_or_fetch = urlfetch_ps.pagesource
else:
lib_or_fetch = urllib_ps.pagesource
rss = lib_or_fetch(siteurl[0])
if re.search(r'ERROR! QwQ', rss):
f = open('main/Vorlage_Error.xml')
root = etree.XML(f.read(), etree.XMLParser(remove_blank_text=True))
f.close()
root.xpath('/rss/channel/title')[0].text = siteurl[0]
root.xpath('/rss/channel/link')[0].text = siteurl[0]
root.xpath('/rss/channel/description')[0].text = siteurl[0]
root.xpath('/rss/channel/lastBuildDate')[0].text = time.strftime(ISOTIMEFORMAT, time.localtime(time.time()))
root.xpath('//item[1]/description')[0].text = rss
root.xpath('//item[1]/link')[0].text = siteurl[0]
root.xpath('//item[1]/guid')[0].text = siteurl[0]
root.xpath('//item[1]/pubDate')[0].text = time.strftime(ISOTIMEFORMAT, time.localtime(time.time()))
return etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="UTF-8")
else:
root = etree.XML(rss, etree.XMLParser(recover=True))
l = len(root.xpath('//item'))
def rssmodi(lib_or_fetch, reg4nextpage, reg4text, reg4comment, root):
artikelurl = root.xpath('//item[$i]/link', i = Nummer)[0].text
artikelurl = re.sub('/\?utm_source=RSS.+', '', artikelurl)
artikelurl = re.sub('/\?ncid=rss_truncated', '', artikelurl)
#用抓取的全文替换原description,并将正文regex中的每个分组合成一个字符串
SeiteQuelle = lib_or_fetch(artikelurl)
encoding = chardet.detect(SeiteQuelle)['encoding']
#判断regex是否有效
if len(re.findall(reg4text, SeiteQuelle)) != 0:
#判断正文正则结果是否只有一组
txt_tmp = re.findall(reg4text, SeiteQuelle)
if isinstance(txt_tmp[0], tuple) is True:
vtext = []
for g in txt_tmp:
vtext.append("".join(g)) #移除decode('utf-8')
else:
vtext = [txt_tmp[0]] #移除decode('utf-8')
comments = ''
Ergebnis = Entdecker.ausfuehren(lib_or_fetch, artikelurl, SeiteQuelle, reg4nextpage, reg4text, reg4comment, vtext, comments)
Haupttext = "".join(Ergebnis[0]).replace(r'<![CDATA\[', '').replace(r']]>', '')
# print comments
if len(reg4comment) == 0:
try:
root.xpath('//item[$i]/description', i = Nummer)[0].text = CDATA(Haupttext)
except ValueError:
if len(custom_para) == 1:
root.xpath('//item[$i]/description', i = Nummer)[0].text = CDATA(remove_control_characters(Haupttext.decode(custom_para[0])))
else:
root.xpath('//item[$i]/description', i = Nummer)[0].text = CDATA(remove_control_characters(Haupttext.decode(encoding, 'replace')))
else:
try:
root.xpath('//item[$i]/description', i = Nummer)[0].text = CDATA(Haupttext + Ergebnis[1])
except ValueError:
if len(custom_para) == 1:
root.xpath('//item[$i]/description', i = Nummer)[0].text = CDATA(remove_control_characters(Haupttext.decode(custom_para[0]) + Ergebnis[1].decode(custom_para[0])))
else:
root.xpath('//item[$i]/description', i = Nummer)[0].text = CDATA(remove_control_characters(Haupttext.decode(encoding, 'replace') + Ergebnis[1].decode(encoding, 'replace')))
elif re.search(r'ERROR! QwQ', SeiteQuelle):
root.xpath('//item[$i]/description', i = Nummer)[0].text = '<span style="color:rgb(255,0,0);font-size:30px;">' + SeiteQuelle + '</span>' #移除decode('utf-8') #error信息可能需要添加decode
#regex无效
else:
root.xpath('//item[$i]/description', i = Nummer)[0].text = '<span style="color:rgb(255,0,0);font-size:30px;">Error: Please check regex</span>'
#限制抓取的文章数
if Anzahl == 0 or Anzahl >= l:
for Nummer in random.sample(range(1, l+1), l):
t = threading.Thread(target=rssmodi, args=(lib_or_fetch, reg4nextpage, reg4text, reg4comment, root))
t.start()
t.join()
time.sleep(float(random.sample(range(1, 6), 1)[0]))
elif Anzahl < l:
for x in range(Anzahl+1, l+1)[::-1]:
item = root.xpath('//item[$i]', i = x)[0]
item.getparent().remove(item)
for Nummer in random.sample(range(1, Anzahl+1), Anzahl):
t = threading.Thread(target=rssmodi, args=(lib_or_fetch, reg4nextpage, reg4text, reg4comment, root))
t.start()
t.join()
time.sleep(float(random.sample(range(1, 6), 1)[0]))
#格式化以符合xml规范
result = re.sub('<title>(?!<\!\[CDATA\[)', r'<title><![CDATA[', unescape(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding='UTF-8')))
result = re.sub('(?<!\]\]>)</title>', r']]></title>', result)
result = re.sub('<description>(?!<\!\[CDATA\[)', r'<description><![CDATA[', result)
result = re.sub('(?<!\]\]>)</description>', r']]></description>', result)
result = re.sub('<category>(?!<\!\[CDATA\[)', r'<category><![CDATA[', result)
result = re.sub('(?<!\]\]>)</category>', r']]></category>', result)
return re.sub(r'(<|<|<)(/|)(body|html)(>|>|>)', '', result)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.