metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jotheshjolly/H-19",
"score": 3
} |
#### File: jotheshjolly/H-19/player.py
```python
import pygame
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 576
# Define some colors
BLACK = (0,0,0)
WHITE = (255,255,255)
class Player(pygame.sprite.Sprite):
change_x = 0
change_y = 0
explosion = False
game_over = False
def __init__(self,x,y,filename):
# Call the parent class (sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(filename).convert()
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.topleft = (x,y)
# Load image which will be for the animation
img = pygame.image.load("walk.png").convert()
# Create the animations objects
self.move_right_animation = Animation(img,32,32)
self.move_left_animation = Animation(pygame.transform.flip(img,True,False),32,32)
self.move_up_animation = Animation(pygame.transform.rotate(img,90),32,32)
self.move_down_animation = Animation(pygame.transform.rotate(img,270),32,32)
# Load explosion image
img = pygame.image.load("explosion.png").convert()
self.explosion_animation = Animation(img,30,30)
# Save the player image
self.player_image = pygame.image.load(filename).convert()
self.player_image.set_colorkey(BLACK)
def update(self,horizontal_blocks,vertical_blocks):
if not self.explosion:
if self.rect.right < 0:
self.rect.left = SCREEN_WIDTH
elif self.rect.left > SCREEN_WIDTH:
self.rect.right = 0
if self.rect.bottom < 0:
self.rect.top = SCREEN_HEIGHT
elif self.rect.top > SCREEN_HEIGHT:
self.rect.bottom = 0
self.rect.x += self.change_x
self.rect.y += self.change_y
# This will stop the user for go up or down when it is inside of the box
for block in pygame.sprite.spritecollide(self,horizontal_blocks,False):
self.rect.centery = block.rect.centery
self.change_y = 0
for block in pygame.sprite.spritecollide(self,vertical_blocks,False):
self.rect.centerx = block.rect.centerx
self.change_x = 0
# This will cause the animation to start
if self.change_x > 0:
self.move_right_animation.update(10)
self.image = self.move_right_animation.get_current_image()
elif self.change_x < 0:
self.move_left_animation.update(10)
self.image = self.move_left_animation.get_current_image()
if self.change_y > 0:
self.move_down_animation.update(10)
self.image = self.move_down_animation.get_current_image()
elif self.change_y < 0:
self.move_up_animation.update(10)
self.image = self.move_up_animation.get_current_image()
else:
if self.explosion_animation.index == self.explosion_animation.get_length() -1:
pygame.time.wait(500)
self.game_over = True
self.explosion_animation.update(12)
self.image = self.explosion_animation.get_current_image()
def move_right(self):
self.change_x = 3
def move_left(self):
self.change_x = -3
def move_up(self):
self.change_y = -3
def move_down(self):
self.change_y = 3
def stop_move_right(self):
if self.change_x != 0:
self.image = self.player_image
self.change_x = 0
def stop_move_left(self):
if self.change_x != 0:
self.image = pygame.transform.flip(self.player_image,True,False)
self.change_x = 0
def stop_move_up(self):
if self.change_y != 0:
self.image = pygame.transform.rotate(self.player_image,90)
self.change_y = 0
def stop_move_down(self):
if self.change_y != 0:
self.image = pygame.transform.rotate(self.player_image,270)
self.change_y = 0
class Animation(object):
def __init__(self,img,width,height):
# Load the sprite sheet
self.sprite_sheet = img
# Create a list to store the images
self.image_list = []
self.load_images(width,height)
# Create a variable which will hold the current image of the list
self.index = 0
# Create a variable that will hold the time
self.clock = 1
def load_images(self,width,height):
# Go through every single image in the sprite sheet
for y in range(0,self.sprite_sheet.get_height(),height):
for x in range(0,self.sprite_sheet.get_width(),width):
# load images into a list
img = self.get_image(x,y,width,height)
self.image_list.append(img)
def get_image(self,x,y,width,height):
# Create a new blank image
image = pygame.Surface([width,height]).convert()
# Copy the sprite from the large sheet onto the smaller
image.blit(self.sprite_sheet,(0,0),(x,y,width,height))
# Assuming black works as the transparent color
image.set_colorkey((0,0,0))
# Return the image
return image
def get_current_image(self):
return self.image_list[self.index]
def get_length(self):
return len(self.image_list)
def update(self,fps=30):
step = 30 // fps
l = range(1,30,step)
if self.clock == 30:
self.clock = 1
else:
self.clock += 1
if self.clock in l:
# Increase index
self.index += 1
if self.index == len(self.image_list):
self.index = 0
``` |
{
"source": "jotheshjolly/Speech-Emotion-Recognition-",
"score": 2
} |
#### File: jotheshjolly/Speech-Emotion-Recognition-/live_predictions.py
```python
import keras
import librosa
import numpy as np
import sys
import pathlib
import subprocess
import sounddevice as sd
from scipy.io.wavfile import write
working_dir_path = pathlib.Path().absolute()
if sys.platform.startswith('win32'):
MODEL_DIR_PATH = str(working_dir_path) + '\\model\\'
EXAMPLES_PATH = str(working_dir_path) + '\\examples\\'
else:
MODEL_DIR_PATH = str(working_dir_path) + '/model/'
EXAMPLES_PATH = str(working_dir_path) + '/examples/'
duration = 3.5
freq = 44400
print("recording started")
recording = sd.rec(int(duration * freq),samplerate = freq, channels = 2)
# Wait for the audio to complete
sd.wait()
write(EXAMPLES_PATH+"recording0.wav", freq, recording)
print("recording ended")
class LivePredictions:
"""
Main class of the application.
"""
def __init__(self, file):
self.file = file
self.path = MODEL_DIR_PATH + 'Emotion_Voice_Detection_Model.h5'
self.loaded_model = keras.models.load_model(self.path)
def make_predictions(self):
data, sampling_rate = librosa.load(self.file)
mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)
#x = np.expand_dims(mfccs, axis=2)
x = np.expand_dims(mfccs, axis=1)
x = np.expand_dims(x, axis=0)
predictions = self.loaded_model.predict_classes(x)
print( "Prediction is", " ", self.convert_class_to_emotion(predictions))
return self.convert_class_to_emotion(predictions)
@staticmethod
def convert_class_to_emotion(pred):
"""
Method to convert the predictions (int) into human readable strings.
"""
label_conversion = {'0': 'neutral',
'1': 'calm',
'2': 'happy',
'3': 'sad',
'4': 'angry',
'5': 'fearful',
'6': 'disgust',
'7': 'surprised'}
for key, value in label_conversion.items():
if int(key) == pred:
label = value
return label
def play_sound(apath):
subprocess.call(["afplay", EXAMPLES_PATH + apath])
if __name__ == '__main__':
print("prediction started")
live_prediction = LivePredictions(file=EXAMPLES_PATH + 'recording0.wav')
live_prediction.loaded_model.summary()
p = live_prediction.make_predictions()
if p == 'calm':
print("playing calm")
play_sound("calm.mp3")
elif p == 'happy':
print("playing happy")
play_sound("happy.mp3")
elif p == 'sad':
print("playing sad")
play_sound("sad.wav")
elif p == 'angry':
print("playing angry")
play_sound("angry.mp3")
elif p == 'fearful':
print("playing fearful")
play_sound("fear.mp3")
elif p == 'disgust':
print("playing disgust")
play_sound("disgust.mp3")
elif p == 'surprised':
print("playing suprised")
play_sound("suprise.mp3")
``` |
{
"source": "jothetester/api_sit_example",
"score": 3
} |
#### File: api_sit_example/lib/create_playlist.py
```python
import requests
import json
import test_data.secrets as secrets
class createPlaylist:
def __init__(self, context, path, playlist_name, playlist_description,playlist_permission):
self.context = context
self.endpoint = self.context.base_url + path
self.playlist_name = playlist_name
self.playlist_description = playlist_description
self.playlist_permission = playlist_permission
self.header={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(secrets.spotify_token)
}
def create_request_body(self):
request_body = json.dumps({
"name": self.playlist_name,
"description": self.playlist_description,
"public": self.playlist_permission
})
return request_body
def create_a_new_playlist(self):
data = self.create_request_body()
self.response = requests.post(
self.endpoint,
data=data,
headers=self.header
)
```
#### File: api_sit_example/lib/list_items_in_playlist.py
```python
import requests
import json
import test_data.secrets as secrets
class listTracks:
def __init__(self, context, path ):
self.context = context
self.endpoint = self.context.base_url + path
self.header={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(secrets.spotify_token)
}
def get_playlist_tracks(self):
self.response = requests.get(
self.endpoint,
headers=self.header
)
``` |
{
"source": "Jothiii/gg_sc_300",
"score": 4
} |
#### File: Jothiii/gg_sc_300/sortgs.py
```python
import requests, os, datetime, argparse
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import pandas as pd
from time import sleep
import warnings
# Solve conflict between raw_input and input on Python 2 and Python 3
import sys
if sys.version[0]=="3": raw_input=input
# Default Parameters
KEYWORD = 'machine learning' # Default argument if command line is empty
NRESULTS = 300 # Fetch 100 articles
CSVPATH = '.' # Current folder
SAVECSV = True
SORTBY = 'Citations'
PLOT_RESULTS = False
STARTYEAR = None
now = datetime.datetime.now()
ENDYEAR = now.year # Current year
# Websession Parameters
GSCHOLAR_URL = 'https://scholar.google.com/scholar?start={}&q={}&hl=en&as_sdt=0,5'
YEAR_RANGE = '' #&as_ylo={start_year}&as_yhi={end_year}'
#GSCHOLAR_URL_YEAR = GSCHOLAR_URL+YEAR_RANGE
STARTYEAR_URL = '&as_ylo={}'
ENDYEAR_URL = '&as_yhi={}'
ROBOT_KW=['unusual traffic from your computer network', 'not a robot']
def get_command_line_args():
# Command line arguments
parser = argparse.ArgumentParser(description='Arguments')
parser.add_argument('--kw', type=str, help="""Keyword to be searched. Use double quote followed by simple quote to search for an exact keyword. Example: "'exact keyword'" """)
parser.add_argument('--sortby', type=str, help='Column to be sorted by. Default is by the columns "Citations", i.e., it will be sorted by the number of citations. If you want to sort by citations per year, use --sortby "cit/year"')
parser.add_argument('--nresults', type=int, help='Number of articles to search on Google Scholar. Default is 100. (carefull with robot checking if value is too high)')
parser.add_argument('--csvpath', type=str, help='Path to save the exported csv file. By default it is the current folder')
parser.add_argument('--notsavecsv', action='store_true', help='By default results are going to be exported to a csv file. Select this option to just print results but not store them')
parser.add_argument('--plotresults', action='store_true', help='Use this flag in order to plot the results with the original rank in the x-axis and the number of citaions in the y-axis. Default is False')
parser.add_argument('--startyear', type=int, help='Start year when searching. Default is None')
parser.add_argument('--endyear', type=int, help='End year when searching. Default is current year')
# Parse and read arguments and assign them to variables if exists
args, _ = parser.parse_known_args()
keyword = KEYWORD
if args.kw:
keyword = args.kw
nresults = NRESULTS
if args.nresults:
nresults = args.nresults
csvpath = CSVPATH
if args.csvpath:
csvpath = args.csvpath
save_csv = SAVECSV
if args.notsavecsv:
save_csv = False
sortby = SORTBY
if args.sortby:
sortby=args.sortby
plot_results = False
if args.plotresults:
plot_results = True
start_year = STARTYEAR
if args.startyear:
start_year=args.startyear
end_year = ENDYEAR
if args.endyear:
end_year=args.endyear
return keyword, nresults, save_csv, csvpath, sortby, plot_results, start_year, end_year
def get_citations(content):
out = 0
for char in range(0,len(content)):
if content[char:char+9] == 'Cited by ':
init = char+9
for end in range(init+1,init+6):
if content[end] == '<':
break
out = content[init:end]
return int(out)
def get_year(content):
for char in range(0,len(content)):
if content[char] == '-':
out = content[char-5:char-1]
if not out.isdigit():
out = 0
return int(out)
def setup_driver():
try:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import StaleElementReferenceException
except Exception as e:
print(e)
print("Please install Selenium and chrome webdriver for manual checking of captchas")
print('Loading...')
chrome_options = Options()
chrome_options.add_argument("disable-infobars")
driver = webdriver.Chrome(chrome_options=chrome_options)
return driver
def get_author(content):
for char in range(0,len(content)):
if content[char] == '-':
out = content[2:char-1]
break
return out
def get_element(driver, xpath, attempts=5, _count=0):
'''Safe get_element method with multiple attempts'''
try:
element = driver.find_element_by_xpath(xpath)
return element
except Exception as e:
if _count<attempts:
sleep(1)
get_element(driver, xpath, attempts=attempts, _count=_count+1)
else:
print("Element not found")
def get_content_with_selenium(url):
if 'driver' not in globals():
global driver
driver = setup_driver()
driver.get(url)
# Get element from page
el = get_element(driver, "/html/body")
c = el.get_attribute('innerHTML')
if any(kw in el.text for kw in ROBOT_KW):
raw_input("Solve captcha manually and press enter here to continue...")
el = get_element(driver, "/html/body")
c = el.get_attribute('innerHTML')
return c.encode('utf-8')
def main():
# Get command line arguments
keyword, number_of_results, save_database, path, sortby_column, plot_results, start_year, end_year = get_command_line_args()
# Create main URL based on command line arguments
if start_year:
GSCHOLAR_MAIN_URL = GSCHOLAR_URL + STARTYEAR_URL.format(start_year)
else:
GSCHOLAR_MAIN_URL = GSCHOLAR_URL
if end_year != now.year:
GSCHOLAR_MAIN_URL = GSCHOLAR_MAIN_URL + ENDYEAR_URL.format(end_year)
# Start new session
session = requests.Session()
#headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
# Variables
links = []
title = []
citations = []
year = []
author = []
rank = [0]
# Get content from number_of_results URLs
for n in range(0, number_of_results, 10):
#if start_year is None:
url = GSCHOLAR_MAIN_URL.format(str(n), keyword.replace(' ','+'))
#else:
# url=GSCHOLAR_URL_YEAR.format(str(n), keyword.replace(' ','+'), start_year=start_year, end_year=end_year)
print("Loading next {} results".format(n+10))
page = session.get(url)#, headers=headers)
c = page.content
if any(kw in c.decode('ISO-8859-1') for kw in ROBOT_KW):
print("Robot checking detected, handling with selenium (if installed)")
try:
c = get_content_with_selenium(url)
except Exception as e:
print("No success. The following error was raised:")
print(e)
# Create parser
soup = BeautifulSoup(c, 'html.parser')
# Get stuff
mydivs = soup.findAll("div", { "class" : "gs_r" })
for div in mydivs:
try:
links.append(div.find('h3').find('a').get('href'))
except: # catch *all* exceptions
links.append('Look manually at: '+url)
try:
title.append(div.find('h3').find('a').text)
except:
title.append('Could not catch title')
try:
citations.append(get_citations(str(div.format_string)))
except:
warnings.warn("Number of citations not found for {}. Appending 0".format(title[-1]))
citations.append(0)
try:
year.append(get_year(div.find('div',{'class' : 'gs_a'}).text))
except:
warnings.warn("Year not found for {}, appending 0".format(title[-1]))
year.append(0)
try:
author.append(get_author(div.find('div',{'class' : 'gs_a'}).text))
except:
author.append("Author not found")
rank.append(rank[-1]+1)
# Delay
sleep(0.5)
# Create a dataset and sort by the number of citations
data = pd.DataFrame(list(zip(author, title, citations, year, links)), index = rank[1:],
columns=['Author', 'Title', 'Citations', 'Year', 'Source'])
data.index.name = 'Rank'
try:
data_ranked = data.sort_values(by=sortby_column, ascending=False)
except Exception as e:
print('Column name to be sorted not found. Sorting by the number of citations...')
data_ranked = data.sort_values(by='Citations', ascending=False)
print(e)
# Add columns with number of citations per year
data_ranked['cit/year'] = data_ranked['Citations']/(end_year + 1 - data_ranked['Year'])
data_ranked['cit/year']=data_ranked['cit/year'].round(0).astype(int)
print(data_ranked)
# Plot by citation number
if plot_results:
plt.plot(rank[1:],citations,'*')
plt.ylabel('Number of Citations')
plt.xlabel('Rank of the keyword on Google Scholar')
plt.title('Keyword: '+keyword)
plt.show()
# Save results
if save_database:
data_ranked.to_csv(os.path.join(path,keyword.replace(' ','_')+'.csv'), encoding='utf-8') # Change the path
if __name__ == '__main__':
main()
``` |
{
"source": "Jothin-kumar/ALPHA-AI",
"score": 3
} |
#### File: Jothin-kumar/ALPHA-AI/alpha.py
```python
import time
import pyautogui
import pyttsx3
import datetime
import requests
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
import pywhatkit
import random
from requests import get
import pyjokes
import PyPDF2
import gui
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[0].id)
engine.setProperty('voice', voices[0].id)
def speak(audio):
gui.speak(audio)
engine.say(audio)
engine.runAndWait()
# WISH ME
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning Sir!")
elif hour>=12 and hour<18:
speak("Good Afternoon Sir!")
else:
speak("Good Evening Sir!")
speak("Acro Here,How may i help you?")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
return "None"
return query
# SEND EMAIL TO
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your email', 'password')
server.sendmail('email to whom send the message', to, content)
server.close()
# NEWS
def news():
main_url = 'url + api'
main_page = requests.get(main_url).json()
articles = main_page["articles"]
head = []
day=["first","second","third","fourth","fifth","sixth","seventh","eighth","ninth","tenth"]
for ar in articles:
head.append(ar["title"])
for i in range (len(day)):
speak(f"today's{day[i]} news is: {head[i]}")
# PDF READER
def pdf_reader():
book = open('Introduction to software engineering.pdf','rb')
pdfReader = PyPDF2.PdfFileReader(book)
pages = pdfReader.numPages
speak(f"Total number of pages in this book {pages}")
speak("Sir please enter the page number i have to read")
pg = int(input("Please enter the page number:"))
page = pdfReader.getPage(pg)
text = page.extractText()
speak(text)
wishMe()
def run_alpha():
query = takeCommand()
print(query)
# WIKIPEDIA
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=1)
speak("According to Wikipedia")
print(results)
speak(results)
# OPEN YOUTUBE
elif 'open youtube' in query:
speak('Okay!')
webbrowser.open("youtube.com")
# OPEN GOOGLE
elif 'open google' in query:
speak('Okay!')
webbrowser.open("google.com")
# OPEN GITHUB
elif 'open github' in query:
webbrowser.open("https://www.github.com")
speak("opening github")
# OPEN INSTAGRAM
elif 'open instagram' in query:
webbrowser.open("https://www.instagram.com")
speak("opening instagram")
# SEARCH SOMETHING ON GOOGLE
elif 'search on google' in query:
speak("What would you like to search on google?")
cm = takeCommand().lower()
webbrowser.open(f"{cm}")
# PLAY MUSIC FROM DIR
elif 'play music' in query:
speak('Playing Music!')
music_dir = 'D:\\keksh'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
# WHATS THE TIME
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The Time is {strTime}")
# OPEN VISUAL STUDIO CODE
elif 'open code' in query:
speak('Opening Visual Studio Code!')
codePath = "C:\\Users\\hp\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
# OPEN NOTEPAD
elif 'open notepad' in query:
speak('Okay!')
nPath = "C:\\Program Files\\Notepad++\\notepad++.exe"
os.startfile(nPath)
# OPEN COMMAND PROMPT
elif 'open command prompt' in query or 'open CMD' in query:
speak('Okay!')
os.system('start cmd')
# SEND EMAIL
elif 'email to sachin' in query:
try:
speak("What should i say?")
content = takeCommand()
to = "<EMAIL>"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry,I am not able to send this email at the moment")
# SEND WHATSAPP MESSAGE
elif 'send message' in query:
speak('Okay! Sending Message...')
pywhatkit.sendwhatmsg('number', 'message here', "time here")
# PLAY MUSIC FROM YOUTUBE
elif 'play' in query:
song = query.replace('play', '')
speak('playing ' + song)
pywhatkit.playonyt(song)
# SIMPLE TALKS
elif "what\'s up" in query or 'how are you' in query:
stMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy','i am okey ! How are you']
ans_q = random.choice(stMsgs)
speak(ans_q)
ans_take_from_user_how_are_you = takeCommand()
if 'fine' in ans_take_from_user_how_are_you or 'happy' in ans_take_from_user_how_are_you or 'okey' in ans_take_from_user_how_are_you:
speak('okey..')
elif 'not' in ans_take_from_user_how_are_you or 'sad' in ans_take_from_user_how_are_you or 'upset' in ans_take_from_user_how_are_you:
speak('oh sorry..')
elif 'make you' in query or 'created you' in query or 'develop you' in query:
ans_m = " For your information <NAME> Created me ! I give Lot of Thannks to Him "
print(ans_m)
speak(ans_m)
elif "who are you" in query or "about you" in query or "your details" in query:
about = "I am Acro an A I based computer program but i can help you lot like a your close friend ! i promise you ! Simple try me to give simple command ! like playing music or video from your directory i also play video and song from web or online ! i can also entain you i so think you Understand me ! ok Lets Start "
print(about)
speak(about)
elif "hello" in query or "hello Acro" in query:
hel = "Hello Sir ! How May i Help you.."
print(hel)
speak(hel)
elif "your name" in query or "sweat name" in query:
na_me = "Thanks for Asking my name my self ! Acro"
print(na_me)
speak(na_me)
elif "you feeling" in query:
print("feeling Very sweet after meeting with you")
speak("feeling Very sweet after meeting with you")
#IP ADDRESS
elif "ip address" in query:
ip = get('https://api.ipify.org').text
speak(f"Your IP address is {ip}")
# JOKE
elif "Tell me a joke" in query:
joke = pyjokes.get_joke()
speak(joke)
# SET ALARM
elif "set alarm" in query:
nn = int(datetime.datetime.now().hour)
if nn == 22:
music_dir = 'D:\\keksh'
songs = os.listdir(music_dir)
os.startfile(os.path.join(music_dir, songs[0]))
# SHUTDOWN
elif "shut down the system" in query:
os.system("shutdown /s /t 5")
# RESTART
elif "restart the system" in query:
os.system("shutdown /r /t 5")
# SLEEP
elif "sleep the system" in query:
os.system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0")
# NEWS
elif "tell me news" in query:
speak("Please wait Sir,fetching the latest news")
news()
# FIND LOCATION
elif "where i am" in query or "where are we" in query:
speak("wait sir let me check")
try:
ipAdd = requests.get('https://api.ipify.org').text
print(ipAdd)
url = 'https://get.geojs.io/v1/ip/geo/'+ipAdd+'.json'
geo_requests = requests.get(url)
geo_data = geo_requests.json()
# print(geo_data)
city = geo_data['city']
state = geo_data['state']
country = geo_data['country']
speak(f"Sir im not sure, but we are in {city} city {state} state of {country} country")
except Exception as e:
speak("Sorry Sir, Due to network issue i am not able to find where we are.")
pass
# TAKE SCREENSHOT
elif "take screenshot" in query:
speak("Sir, Please tell me the name for the screenshot file")
name = takeCommand().lower()
speak("Taking Screenshot...!")
time.sleep(2)
img = pyautogui.screenshot()
img.save(f"{name}.png")
speak("ScreenShot Saved...!")
# PDF READER
elif "read pdf" in query:
pdf_reader()
else:
url = "http://api.brainshop.ai/get?bid=157984&key=<KEY>2KYs4&uid=[uid]&msg=[{}]".format(query)
response = requests.get(url).json()['cnt']
print(response)
speak(response)
gui.set_speak_command(run_alpha)
gui.mainloop()
``` |
{
"source": "Jothin-kumar/chat-app",
"score": 2
} |
#### File: scripts/client/gui.py
```python
import tkinter as tk
from tkinter.messagebox import showerror
root = tk.Tk()
root.title("Chat application")
root.minsize(width=1000, height=500)
main = root.mainloop
servers_and_channels = tk.Frame(root, bg='black')
servers = tk.Frame(servers_and_channels, width=25, bg='gray15')
def set_servers(servers_: list):
for server in servers_:
button = tk.Button(servers, text=server.name, command=server.set)
button.pack(side=tk.TOP, anchor=tk.W, padx=20, pady=10)
server.configure(button)
def get_server_by_name_command(name):
# Command to get a server object by name. can be set using the set_get_server_by_name_command method.
pass
def set_get_server_by_name_command(command):
global get_server_by_name_command
get_server_by_name_command = command
profile_button = tk.Button(servers, text="Profile", bg='gray15', fg='white', font=('Helvetica', 15))
profile_button.pack(side=tk.TOP, fill=tk.X)
servers.pack(side=tk.LEFT, fill=tk.Y)
channels_frame = tk.Frame(servers_and_channels, width=30, bg='gray17')
channels = []
def set_channels(channel_names_and_command: list):
global channels
for channel in channels:
channel.pack_forget()
channels = []
for channel in channel_names_and_command:
button = tk.Button(channels_frame, text=channel.name, command=channel.set)
button.pack(side=tk.TOP, anchor=tk.W, padx=20, pady=10, fill=tk.X)
channels.append(button)
server_name_label = tk.Label(channels_frame, text="Server", bg='gray15', fg='white', font=('Helvetica', 25), width=15)
def set_server_name(text):
server_name_label.config(text=text)
def get_server_name():
return server_name_label.cget("text")
server_name_label.pack(side=tk.TOP, fill=tk.X)
channels_frame.pack(side=tk.LEFT, fill=tk.Y)
servers_and_channels.pack(side=tk.LEFT, fill=tk.Y)
message_area = tk.Frame(root)
channel_name_label = tk.Label(message_area, bg='gray20', fg='snow', text="Channel", font=("Helvetica", 25))
def set_channel_name(text):
channel_name_label.config(text=text)
def get_channel_name():
return channel_name_label.cget("text")
channel_name_label.pack(side=tk.TOP, fill=tk.X)
chats_frame = tk.Frame(message_area, bg='gray25')
messages_frame = tk.Frame(chats_frame, bg='gray25')
message_labels = []
def incoming_message(message):
message_label = tk.Label(messages_frame, bg='gray20', fg='snow', text=message, font=("Helvetica", 15))
message_label.pack(side=tk.TOP, anchor=tk.W, padx=20, pady=10)
message_labels.append(message_label)
def outgoing_message(message):
message_label = tk.Label(messages_frame, bg='gray20', fg='snow', text=message, font=("Helvetica", 15))
message_label.pack(side=tk.TOP, anchor=tk.E, padx=20, pady=10)
message_labels.append(message_label)
def clear_messages():
for message_label in message_labels:
message_label.pack_forget()
message_labels.clear()
messages_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
send_message_frame = tk.Frame(chats_frame, bg='gray25')
message_input = tk.Entry(send_message_frame, bg='gray30', fg='white', font=("Helvetica", 15))
def on_enter(event):
send_message()
message_input.bind('<Return>', on_enter)
message_input.pack(side=tk.LEFT, fill=tk.X, expand=True)
def send_message():
if get_server_by_name_command(get_server_name()).button['bg'] == 'green':
msg = message_input.get().strip().strip('\n')
if msg:
outgoing_message(msg)
message_input.delete(0, tk.END)
send_message_command(msg)
else:
showerror(
title='Not connected',
message='Your message cannot be sent because, you are not connected.'
'Please connect to the internet and try again.'
)
def send_message_command(msg):
# Command to send a message to the server. can be set using the set_send_message_command method.
pass
def set_send_message_command(command):
global send_message_command
send_message_command = command
send_message_button = tk.Button(send_message_frame, text="Send", bg='gray20', fg='white', font=("Helvetica", 15),
command=send_message)
send_message_button.pack(side=tk.LEFT, fill=tk.X, padx=5)
send_message_frame.pack(side=tk.BOTTOM, fill=tk.X, padx=10, pady=15)
chats_frame.pack(fill=tk.BOTH, expand=True)
message_area.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
```
#### File: scripts/client/main.py
```python
import tkinter
import socket_handler
import message_parser
import messages
import gui
class Channel:
def __init__(self, name):
self.name = name
def set(self):
gui.set_channel_name(self.name)
gui.clear_messages()
for message in messages.get_messages():
if message.server == gui.get_server_name() and message.channel == self.name:
if message.type_ == "incoming":
gui.incoming_message(message.msg)
elif message.type_ == "outgoing":
gui.outgoing_message(message.msg)
class Server:
def __init__(self, host, port, name, channels: list):
self.host = host
self.port = port
self.name = name
def on_new_message(channel, message):
messages.Message(server=self.name, channel=channel, message=message, type_="incoming")
if self.name == gui.get_server_name() and channel == gui.get_channel_name():
gui.incoming_message(message)
self.socket = socket_handler.ServerConnection(self.host, self.port, on_new_message)
self.channels = channels
self.send = self.socket.send
self.button = tkinter.Button
def set(self):
gui.set_server_name(self.name)
gui.set_channels(self.channels)
self.channels[0].set()
def configure(self, button):
self.socket.configure(button)
self.button = button
servers = [Server("localhost", 1265, "Server 1", [Channel("Channel 1"), Channel("Channel 2")])]
gui.set_servers(servers)
servers[0].set()
servers[0].channels[0].set()
def get_current_server_by_name(name):
for server in servers:
if server.name == name:
return server
return None
def send_msg(message):
server = get_current_server_by_name(gui.get_server_name())
channel = gui.get_channel_name()
server.send(message_parser.encode_message(channel=channel, message=message))
messages.Message(server=server.name, channel=channel, message=message, type_="outgoing")
gui.set_send_message_command(send_msg)
gui.set_get_server_by_name_command(get_current_server_by_name)
gui.main()
```
#### File: scripts/client/socket_handler.py
```python
import socket
import threading
import message_parser
from datetime import datetime
import time
import tkinter
class ServerConnection:
def __init__(self, host, port, on_new_message_command):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = port
self.button = tkinter.Button
self.on_new_message_command = on_new_message_command
self.last_connection_verified = datetime.now().timestamp() - 15
threading.Thread(target=self.connect_forever).start()
threading.Thread(target=self.check_for_new_messages).start()
threading.Thread(target=self.verify_connection_forever).start()
def configure(self, button):
self.button = button
self.button['bg'] = 'yellow'
def connect_forever(self):
while True:
try:
if self.button['bg'] == 'yellow':
self.socket.connect((self.host, self.port))
self.button['bg'] = 'green'
except ConnectionRefusedError:
pass
except OSError:
pass
except TypeError:
pass
except RuntimeError:
break
time.sleep(10)
def send(self, msg):
message_sent = False
while not message_sent:
try:
self.socket.send(msg.encode())
message_sent = True
except BrokenPipeError:
self.socket.close()
self.button['bg'] = 'yellow'
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
self.button['bg'] = 'green'
def receive(self):
return self.socket.recv(1024).decode()
def on_new_message(self, message):
if message == 'you are connected':
self.last_connection_verified = datetime.now().timestamp()
else:
channel, incoming_message = message_parser.decode_message(message)
self.on_new_message_command(channel, incoming_message)
def verify_connection_forever(self):
while True:
try:
if datetime.now().timestamp() - self.last_connection_verified > 9:
self.button['bg'] = 'yellow'
try:
self.socket.connect((self.host, self.port))
self.button['bg'] = 'green'
except ConnectionRefusedError:
pass
except OSError:
self.button['bg'] = 'green'
else:
self.button['bg'] = 'green'
time.sleep(10)
except RuntimeError:
break
except:
pass
def check_for_new_messages(self):
while True:
try:
msg = self.receive()
if msg == '\n':
continue
self.on_new_message(msg)
except RuntimeError:
break
except:
pass
``` |
{
"source": "Jothin-kumar/Geometry_app",
"score": 3
} |
#### File: shapes/_angles/__init__.py
```python
import sys
# Import global_variables module
sys.path.append('../../')
import global_variables
class Angle:
def __init__(self, line1: global_variables.get_value('Line'), line2: global_variables.get_value('Line')):
for angle_ in global_variables.get_value('angles'):
if angle_.lines in ([line1, line2], [line2, line1]):
raise ValueError('Angle already exists')
self.lines = [line1, line2]
self.vertex = None
for point1 in line1.points:
for point2 in line2.points:
if point1 == point2:
self.vertex = point1
self.points = []
for line in self.lines:
for point in line.points:
if point not in self.points and point != self.vertex:
self.points.append(point)
self.name = f'{self.points[0].name}{self.vertex.name}{self.points[1].name}'
def highlight(self):
for angle_ in global_variables.get_value('angles'):
angle_.unhighlight()
for line_ in global_variables.get_value('lines'):
line_.unhighlight()
for line in self.lines:
line.highlight(unhighlighted_others=True)
def unhighlight(self):
for line in global_variables.get_value('lines'):
line.unhighlight()
def angle(line1: global_variables.get_value('Line'), line2: global_variables.get_value('Line')):
angle_ = Angle(line1, line2)
global_variables.get_value('angles').append(angle_)
return angle_
def refresh_angles():
global_variables.set_value('angles', [])
for line1 in global_variables.get_value('lines'):
for line2 in global_variables.get_value('lines'):
if not line1 == line2:
if line1.point1 == line2.point1 or line1.point1 == line2.point2 or line1.point2 == line2.point1 or line1.point2 == line2.point2:
try:
angle(line1, line2)
except ValueError:
pass
def get_angle_by_name(name: str):
for angle_ in global_variables.get_value('angles'):
if angle_.name == name:
return angle_
```
#### File: shapes/_line/_intersecting_lines.py
```python
import sys
# Import global_variables module
sys.path.append('../../')
import global_variables
def refresh_intersecting_lines(create_text_command, delete_line):
global_variables.set_value('intersecting_lines_and_intersection_point', {})
horizontal_lines = []
vertical_lines = []
for line in global_variables.get_value('lines'):
if line.point1.y == line.point2.y:
horizontal_lines.append(line)
elif line.point1.x == line.point2.x:
vertical_lines.append(line)
for horizontal_line in horizontal_lines:
for vertical_line in vertical_lines:
global_variables.get_value('intersecting_lines_and_intersection_point')[(vertical_line.point1.x,
horizontal_line.point1.y)] = {
'horizontal line': horizontal_line, 'vertical line': vertical_line
}
try:
global_variables.get_value('point')(vertical_line.point1.x, horizontal_line.point1.y,
create_text_command, delete_line)
except ValueError:
pass
``` |
{
"source": "Jothin-kumar/time-widget",
"score": 3
} |
#### File: windows/main_widget/gui.py
```python
import tkinter as tk
left_top = 'left-top'
right_bottom = 'right-bottom'
def get_location_on_screen():
with open('location_on_screen.txt') as loc:
return loc.read().strip('\n')
def set_location_on_scren(location):
with open('location_on_screen.txt', 'w') as loc:
loc.write(location)
class MinWidget:
def __init__(self):
self.root = tk.Tk()
self.root.overrideredirect(True)
self.root.attributes('-topmost', True)
self.root.resizable(False, False)
self.time_viewer = tk.Label(master=self.root, text='', font=("Arial", 25))
self.timezone_viewer = tk.Label(master=self.root, text='', font=("Arial", 15))
self.time_viewer.pack()
self.timezone_viewer.pack()
self.mainloop = self.root.mainloop
self.destroy = self.root.destroy
def on_right_click(event):
location_on_screen = get_location_on_screen()
if location_on_screen == left_top:
set_location_on_scren(right_bottom)
elif location_on_screen == right_bottom:
set_location_on_scren(left_top)
self.refresh_size_and_location()
self.time_viewer.bind('Button-3>', on_right_click)
self.refresh_size_and_location()
def refresh_size_and_location(self):
location_on_screen = get_location_on_screen()
self.root.update()
if location_on_screen == left_top:
self.root.geometry('+0+0')
elif location_on_screen == right_bottom:
w = int(self.root.winfo_geometry().split('+')[0].split('x')[0])
h = int(self.root.winfo_geometry().split('+')[0].split('x')[1])
self.root.geometry(
'+' + str(self.root.winfo_screenwidth() - w) + '+' + str(self.root.winfo_screenheight() - h)
)
def set_time(self, time: str):
self.time_viewer['text'] = time
self.refresh_size_and_location()
def set_timezone(self, timezone: str):
self.timezone_viewer['text'] = timezone
self.refresh_size_and_location()
def configure_switch_to_max(self, command):
self.root.bind('<Button-1>', command)
class MaxWidget:
def __init__(self):
self.root = tk.Tk()
self.root.overrideredirect(True)
self.root.attributes('-topmost', True)
self.root.resizable(False, False)
self.time_viewer = tk.Label(master=self.root, text='', font=("Arial", 25))
self.mainloop = self.root.mainloop
self.destroy = self.root.destroy
self.countries_list_box = tk.Listbox(master=self.root, height=25)
self.time_zone_listbox = tk.Listbox(master=self.root, height=25)
self.countries_list_box.grid(row=1, column=0)
self.time_zone_listbox.grid(row=1, column=1)
self.time_viewer.grid(row=0, column=0)
def on_right_click(event):
location_on_screen = get_location_on_screen()
if location_on_screen == left_top:
set_location_on_scren(right_bottom)
elif location_on_screen == right_bottom:
set_location_on_scren(left_top)
self.refresh_size_and_location()
self.root.bind('<Button-3>', on_right_click)
self.refresh_size_and_location()
def refresh_size_and_location(self):
location_on_screen = get_location_on_screen()
self.root.update()
if location_on_screen == left_top:
self.root.geometry('+0+0')
def to_right_bottom():
set_location_on_scren(right_bottom)
elif location_on_screen == right_bottom:
w = int(self.root.winfo_geometry().split('+')[0].split('x')[0])
h = int(self.root.winfo_geometry().split('+')[0].split('x')[1])
self.root.geometry(
'+' + str(self.root.winfo_screenwidth() - w) + '+' + str(self.root.winfo_screenheight() - h)
)
def to_left_top():
set_location_on_scren(left_top)
def set_countries(self, countries: list):
self.countries_list_box.delete(0, tk.END)
for country in countries:
self.countries_list_box.insert(tk.END, f'{country.full_name} - {country.iso3166_code}')
self.refresh_size_and_location()
def set_timezones(self, timezones: list):
self.time_zone_listbox.delete(0, tk.END)
for timezone in timezones:
self.time_zone_listbox.insert(tk.END, timezone)
self.refresh_size_and_location()
def set_on_country_select(self, command):
def onselect(evt):
try:
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
command(value[-2:])
except:
pass
self.countries_list_box.bind('<<ListboxSelect>>', onselect)
def set_on_timezone_select(self, command):
def onselect(evt):
try:
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
command(value)
except:
pass
self.time_zone_listbox.bind('<<ListboxSelect>>', onselect)
def set_time(self, time: str):
self.time_viewer['text'] = time
self.refresh_size_and_location()
def configure_switch_to_min(self, command):
self.time_viewer.bind('<Button-1>', command)
``` |
{
"source": "JOThurgood/kalman-filter-course-assignments",
"score": 3
} |
#### File: JOThurgood/kalman-filter-course-assignments/assignment1.py
```python
import numpy as np
from sim.sim1d import sim_run
# Simulator options.
options = {}
options['FIG_SIZE'] = [8,8]
options['CONSTANT_SPEED'] = False
# weight used in measure_and_update
# should be >0 (if ==0 always take v =v_0 = 0)
# if == 1 self.v is always just set to the measurement (and so responds to changes in v quickly)
# if small, it responds slowly to changes in speed
# the effects on response time trade off against those on the errors - if you put 100% weight in new measurements
# then you cant reduce the error by encorporating old measurements.
weight = 0.5
class KalmanFilterToy:
def __init__(self):
self.v = 0
self.prev_x = 0
self.prev_t = 0
def predict(self,t):
# return "prediction" which is the current x state
dt = t - self.prev_t
prediction = self.prev_x + self.v * dt
return prediction
def measure_and_update(self,x,t):
dt = t - self.prev_t
dx = x - self.prev_x
measured_v = dx / dt
self.v += weight * (measured_v - self.v)
self.prev_x = x
self.prev_t = t
return
sim_run(options,KalmanFilterToy)
``` |
{
"source": "JOThurgood/SimpleCFD",
"score": 3
} |
#### File: CCAPS/python_plotting/ghia_re1e3.py
```python
import numpy as np
import matplotlib.pyplot as plt
import os
def getfilenameindex(filename):
filenum = 0
while (os.path.exists(filename+"{:04}".format(filenum)+".png")):
filenum+=1
filename = "{}{:04}.png".format(filename,filenum)
return filename
# Load the data
time = np.fromfile('time.dat', dtype = np.float64)
xc = np.fromfile('xc.dat', dtype = np.float64)
yc = np.fromfile('yc.dat', dtype = np.float64)
u = np.fromfile('u.dat', dtype = np.float64)
v = np.fromfile('v.dat', dtype = np.float64)
# Reshape arrays
u = u.reshape(yc.size, xc.size)
v = v.reshape(yc.size, xc.size)
cen = yc.size // 2
plt.plot(yc,(u[:,cen]+u[:,cen+1])/2.)
y_ghia = np.array([
0.0000,
0.0547,
0.0625,
0.0703,
0.1016,
0.1719,
0.2813,
0.4531,
0.5,
0.6172,
0.7344,
0.8516,
0.9531,
0.9609,
0.9688,
0.9766,
1.])
u_center_re1000 = np.array([
1.0,
0.65928,
0.57492,
0.51117,
0.46604,
0.33304,
0.18719,
0.05702,
-0.06080,
-0.10648,
-0.27805,
-0.38289,
-0.29730,
-0.22220,
-0.20196,
-0.18109,
0.00000 ])
u_center_re1000 = np.flip(u_center_re1000,0)
plt.scatter(y_ghia,u_center_re1000)
plt.scatter(yc,(u[:,cen]+u[:,cen+1])/2.,color='red')
plt.title('u on central vertical axis at t = {:.3f}'.format(float(time)))
plt.savefig(getfilenameindex("output/ghia_u_cent_"))
``` |
{
"source": "joticajulian/koinos-types",
"score": 3
} |
#### File: programs/generate-ids/generate_ids.py
```python
import hashlib
import sys
def do_hash(name, prefix_value):
h = int(hashlib.sha256(name.encode("utf-8")).hexdigest(), 16)
h_top_28 = h >> (256 - 28)
return hex(h_top_28 | (prefix_value << 28))
# Thunk ID is 8 followed by top 28 bits of H("thunk_id::"+name)
def get_thunk_id(name):
return do_hash("thunk_id::"+name, 8)
# System call ID is 9 followed by top 28 bits of H("system_call_id::"+name)
def get_system_call_id(name):
return do_hash("system_call_id::"+name, 9)
def main():
if len(sys.argv) != 2:
print("Usage: generate_ids.py <thunk_name>")
return
name = sys.argv[1]
thunk_id = get_thunk_id(name)
system_call_id = get_system_call_id(name)
print(f"Thunk: {name}")
print(f"Thunk ID: {thunk_id}")
print(f"System Call ID: {system_call_id}")
return
if __name__ == "__main__":
main()
```
#### File: koinos-types/koinos_codegen/codegen.py
```python
import pluginbase
import argparse
import json
import os
import sys
def write_if_different(dest_filename, content):
if os.path.exists(dest_filename):
with open(dest_filename, "r") as f:
current_content = f.read()
if current_content == content:
return
with open(dest_filename, "w") as f:
f.write(content)
plugin_base = pluginbase.PluginBase(package="koinos_codegen.plugins")
# TODO: search path for builtin plugins?
class Application:
def __init__(self, name="koinos_codegen", plugin_searchpath=[]):
self.name = name
self.targets = {}
self.plugin_source = plugin_base.make_plugin_source(
searchpath=plugin_searchpath,
identifier=self.name,
)
for plugin_name in self.plugin_source.list_plugins():
plugin = self.plugin_source.load_plugin(plugin_name)
plugin.setup(self)
def register_target(self, name, target):
self.targets[name] = target
class ApplicationError(Exception):
pass
def main(argv):
argparser = argparse.ArgumentParser(description="Analyze files")
argparser.add_argument("schema", metavar="FILE", nargs="*", type=str, help="Schema file(s)")
argparser.add_argument("-t", "--target", metavar="LANG", default="python", type=str, help="Target language (default=python)")
argparser.add_argument("--target-path", metavar="DIRS", action="append", type=str, help="Search path for language target definitions")
argparser.add_argument("--list-targets", action="store_true", help="List available language definitions and exit")
argparser.add_argument("-p", "--package", metavar="PKG", default="", type=str, help="Output package name")
argparser.add_argument("-o", "--output", metavar="DIR", default="build", type=str, help="Output directory")
args = argparser.parse_args(argv)
if args.package == "":
raise ApplicationError("Required: package")
package_dir = os.path.join(args.output, args.package)
# if os.path.exists(package_dir):
# raise ApplicationError("Package dir must not exist")
os.makedirs(package_dir, exist_ok=True)
target_path = args.target_path
if target_path is None:
target_path = []
app = Application(plugin_searchpath=target_path)
try:
if args.list_targets:
for plugin_name in app.targets:
print(plugin_name)
return 0
if len(args.schema) != 1:
raise RuntimeError("Must specify exactly one input file")
generate_target = app.targets[args.target]
with open(args.schema[0], "r") as f:
schema = json.load(f)
generated = generate_target(schema)
for filename, content in generated["files"].items():
target_filename = os.path.join(package_dir, filename)
target_dir = os.path.dirname(target_filename)
os.makedirs(target_dir, exist_ok=True)
write_if_different(target_filename, content)
finally:
pass
if __name__ == "__main__":
result = main(sys.argv[1:])
sys.exit(result)
```
#### File: koinos-types/koinos_reflect/schema.py
```python
from . import parser
from . import baseparser
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from typing import List, Tuple, Union
from collections import OrderedDict
from .parser import *
class AnalyzeError(Exception):
pass
class TreeListener:
def process_toplevel(self, node): yield
def process_typedef(self, node): yield
def process_struct(self, node): yield
def process_ns(self, node): yield
def process_enum(self, node): yield
def process_basetype(self, node): yield
def process_field(self, node): yield
def process_typeref(self, node): yield
def process_intliteral(self, node): yield
def process_enum_entry(self, node): yield
def walk(listener, node):
if isinstance(node, parser.Toplevel):
f = listener.process_toplevel
subnodes = node.decls
elif isinstance(node, parser.Typedef):
f = listener.process_typedef
subnodes = [node.tref]
elif isinstance(node, parser.Struct):
f = listener.process_struct
subnodes = node.fields
elif isinstance(node, parser.Namespace):
f = listener.process_ns
subnodes = node.decls
elif isinstance(node, parser.EnumClass):
f = listener.process_enum
subnodes = [node.tref] + node.entries
elif isinstance(node, parser.BaseType):
f = listener.process_basetype
subnodes = []
elif isinstance(node, parser.Field):
f = listener.process_field
subnodes = [node.tref]
elif isinstance(node, parser.Typeref):
f = listener.process_typeref
subnodes = node.targs
if subnodes is None:
subnodes = []
elif isinstance(node, parser.IntLiteral):
f = listener.process_intliteral
subnodes = []
elif isinstance(node, parser.EnumEntry):
f = listener.process_enum_entry
subnodes = []
else:
raise AnalyzeError("Unknown node type")
# Pre-node: Run until yield statement
it = f(node)
next(it)
for sn in subnodes:
walk(listener, sn)
# Post-node: Continue, expect StopIteration
try:
next(it)
raise AnalyzeError("Multiple yields while processing node")
except StopIteration:
pass
class NamespaceStacker(TreeListener):
def __init__(self):
super().__init__()
self.namespace_stack = []
def process_ns(self, node):
self.namespace_stack.append(node.name)
yield
self.namespace_stack.pop()
class TypemapBuilder(NamespaceStacker):
def __init__(self):
super().__init__()
self.typemap = OrderedDict()
def add_to_typemap(self, name, node):
name = tuple(name)
if name in self.typemap:
raise AnalyzeError("Multiple definition of {}".format(name))
self.typemap[name] = node
def process_basetype(self, node):
self.add_to_typemap(node.name, node)
yield
def process_typedef(self, node):
self.add_to_typemap(self.namespace_stack + [node.name], node)
yield
def process_struct(self, node):
self.add_to_typemap(self.namespace_stack + [node.name], node)
yield
def process_enum(self, node):
self.add_to_typemap(self.namespace_stack + [node.name], node)
yield
class ReferenceChecker(NamespaceStacker):
def __init__(self, typemap):
super().__init__()
self.typemap = typemap
def search(self, name):
# Search for a qualified name in each enclosing namespace
ns = list(self.namespace_stack)
while True:
fqn = tuple(ns + name)
if fqn in self.typemap:
return fqn
if len(ns) == 0:
return None
ns.pop()
def process_typeref(self, node):
fqn = self.search(node.name)
if fqn is None:
raise AnalyzeError("Unknown type {}", "::".join(node.name))
node.name = fqn
yield
# Immediate dependencies of a node
class FindDepsWalker(TreeListener):
def __init__(self, typemap=None):
super().__init__()
self.typemap = typemap
self.dep_set = set()
self.dep_list = []
def process_typeref(self, node):
name = tuple(node.name)
if name not in self.dep_set:
self.dep_set.add(name)
self.dep_list.append(name)
yield
def find_immediate_deps(typemap, name):
finder = FindDepsWalker(typemap)
walk(finder, typemap[name])
return finder.dep_list
class TopologicalSorter:
def __init__(self, deps, max_depth):
self.deps = deps
self.max_depth = max_depth
self.emitted_set = set()
self.emitted_list = list()
def process_item(self, item, depth=0):
if item in self.emitted_set:
return
next_depth = depth+1
for d in self.deps(item):
if d in self.emitted_set:
continue
if next_depth >= self.max_depth:
raise AnalyzeError("Topological sort depth exceeded, circular dependency on {}".format(item))
self.process_item(d, next_depth)
self.emit(d)
self.emit(item)
def emit(self, item):
if item not in self.emitted_set:
self.emitted_set.add(item)
self.emitted_list.append(item)
def topological_sort(items, deps, max_depth=20):
#
# Topological sort: Given a set of items, and a function deps(item)
# which gives the dependencies of an item, sort the items such that an item's dependencies precede it.
#
sorter = TopologicalSorter(deps, max_depth)
for item in items:
sorter.process_item(item)
return sorter.emitted_list
@dataclass_json
@dataclass
class Schema:
decls: List[Tuple[List[str], parser.Decl]]
info: OrderedDict = field(default_factory=parser.info_factory("Schema"))
class Schemanator:
def __init__(self, tree=()):
self.tree = tree
self.typemap = OrderedDict()
self.schema = None
def build_typemap(self):
builder = TypemapBuilder()
walk(builder, self.tree)
self.typemap = builder.typemap
def check_references(self):
checker = ReferenceChecker(self.typemap)
walk(checker, self.tree)
def find_types(self):
# Not actually used, demo method to demonstrate how to process all typeref nodes
listener = TreeListener()
typerefs = []
def process_typeref(node):
typerefs.append(node.name)
yield
listener.process_typeref = process_typeref
walk(listener, self.tree)
#print("typerefs:", typerefs)
def sort_typemap(self):
finder = FindDepsWalker(self.typemap)
tuple_names = [tuple(name) for name in self.typemap.keys()]
deps = lambda x : find_immediate_deps(self.typemap, x)
sorted_tuple_names = topological_sort(tuple_names, deps)
new_typemap = OrderedDict()
for name in sorted_tuple_names:
new_typemap[name] = self.typemap[name]
self.typemap = new_typemap
def create_schema(self):
decls = []
for k, v in self.typemap.items():
decls.append((list(k), v))
self.schema = Schema(decls=decls)
def schemanate(self):
self.build_typemap()
self.check_references()
self.sort_typemap()
self.create_schema()
``` |
{
"source": "JotinhaBR/Aulas-Python-FC",
"score": 4
} |
#### File: Aulas-Python-FC/aula17/buscar.py
```python
from twitter import Twitter
def go():
## variaveis com keys e tokens para logar na API do twitter
consumer_key = "G7dO3c2VrZI0FPQHDsEQB36YR"
consumer_secret = "<KEY>"
token_key = "<KEY>"
token_secret = "<KEY>"
tw_connect = Twitter(consumer_key, consumer_secret, token_key, token_secret)
## input para perguntar o que ele quer pesquisar
pesquisar = input('Busca no twitter: ')
j_req = tw_connect.buscar(pesquisar)
## colocando resultados na tela
for tweets in j_req['statuses']:
print(' ')
print('Tweet do user (', tweets['user']['screen_name'], ')')
print(tweets['text'])
print(' ')
```
#### File: Aulas-Python-FC/aula9/mostro.py
```python
class Mostro():
def __init__(self, nome, altura, musculo):
self.nome = nome
self.altura = altura
self.musculo = musculo
pass
def ficarGrandePorra(self, ganho):
self.musculo += ganho
pass
``` |
{
"source": "jotix16/Learning.",
"score": 3
} |
#### File: Learning./.folderwise_organization/extract.py
```python
import os
def handle_topic(section):
splited = section.split("\n###")
section_name = splited[0].replace(" ", "") +".md"
assert(not os.path.isfile(section_name))
with open(section_name, "w") as f:
for t in splited[1:]:
f.write("\n###"+t) # add back the ###
def handle_readme(readme):
r_splits = readme.split("\n# Index")
index = r_splits[1].split("\n")
with open("README.md", "w") as f:
f.write(r_splits[0] + "\n# Index") ## copy 1-to-1 up to inkl # Index
section = ""
for line in index[0:]:
start = line.find("(")
end = line.find(")")
name = line[start+1:end]
if end == start: # no linking
pass
elif line[0]=="-": # Topic, we can get file from
section = line[line.find("[")+1 : line.find("]")].replace(" ","") + ".md" ## get the file name
line = line[:start] + "(" + section + ")"
else:
line = line[:start] + "(" + section + name + ")" ## link to section in file
f.write(line+"\n") # add back the "\n"
# Read READNE.md file we are reading from
file_ = open('README.md','r') # reade
data = file_.read()
file_.close()
os.system('rm README.md')
# Part that is going to README.md
splitData = data.split("\n## ")
handle_readme(splitData[0])
# Topics == Seperate Files
for i in splitData[1:]:
handle_topic(i)
``` |
{
"source": "jotomicron/phd-thesis",
"score": 3
} |
#### File: images/incubator/pdb-plot.py
```python
import matplotlib
matplotlib.use('agg')
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('text.latex', preamble=r"\usepackage{lmodern}")
import matplotlib.pyplot as plt
import math
def pdb_stats():
fig, ax = plt.subplots()
fig.set_size_inches(5.5, 3.5)
data = []
curr = 0
with open('pdb-stats.txt') as f:
for line in f:
fields = line.rstrip('\n').split()
x = int(fields[0])
y = int(fields[2])
data.append((x, y))
xs, ys = zip(*data)
ys = [i / 1000 for i in ys]
ax.bar(xs, ys, 0.6, color='0.75', edgecolor='0.5', linewidth=0.5)
# # Exponential trend with linearly decaying exponent
# k, m, b = 0.78381204484429345, -0.0047458845051429489, 0.48030313137331598
# trend = [
# k / 1000 * math.exp(m * (x - 1972) * (x - 1972) + b * (x - 1972))
# for x in xs
# ]
# ax.plot(xs, trend, color='0', linewidth=2)
# # Sigmoidal trend
# L, k, x0 = 155.22876, 0.22066, 2010.83306
# trend = [L / (1 + math.exp(-k * (x - x0))) for x in xs]
# ax.plot(xs, trend, color='0', linewidth=2)
# Regular exponential trend
trend = [
8.813665325E-134 * math.exp(1.544870016E-1*x)
for x in xs
]
ax.plot(xs, trend, color='0', linewidth=2)
ax.set_xlim((1970, 2020))
ax.set_ylim((0, 119))
ax.set_xlabel('Year')
ax.set_ylabel('\# PDB structures (in thousands)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.spines['bottom'].set_linewidth(0.5)
ax.tick_params(axis='y', which='both', length=0)
ax.yaxis.grid(which='both', color='0.75', linestyle='-')
ax.set_axisbelow(True)
labels = list(range(1975, 2016, 5))
ax.set_xticks([i + 0.3 for i in labels])
ax.tick_params(axis='x', which='both', length=0)
ax.set_xticklabels(labels, rotation=90)
plt.tight_layout()
plt.tight_layout()
fig.savefig('../pdb-stats.pdf')
plt.close()
pdb_stats()
``` |
{
"source": "jototland/example_python_package",
"score": 2
} |
#### File: example_python_package/tests/test_hello.py
```python
from mypackage import hello
def test_hello(capsys):
hello()
written = capsys.readouterr()
assert 'Hello, World!' in written.out
assert 'hello.txt' in written.out
``` |
{
"source": "JotraN/reddit-bookmarks",
"score": 3
} |
#### File: JotraN/reddit-bookmarks/reddit-bookmarks.py
```python
import praw
import os
from datetime import datetime
from getpass import getpass
from urllib.parse import urlparse
def download_saved_submissions(reddit_user, reddit_pass):
userAgent = ('Saved Posts Downloader')
reddit = praw.Reddit(user_agent=userAgent,
client_id=os.environ['RED_SAVED_CLIENT_ID'],
client_secret=os.environ['RED_SAVED_CLIENT_SECRET'],
username=reddit_user,
password=<PASSWORD>)
return [saved for saved in reddit.user.me().saved()
if isinstance(saved, praw.models.Submission)]
def remove_reddit_submissions(submissions):
return [submission for submission in submissions
if urlparse(submission.url).hostname != 'www.reddit.com']
def write_to_bookmarks(bookmarks_path, submissions):
f = open(f'{bookmarks_path}bookmarks.md', 'w')
for submission in submissions:
f.write(f'**{submission.title}** \n')
f.write(f'[{submission.url}]({submission.url}) \n')
f.write(' \n')
f.close()
def add_commit_push_bookmarks(repo_path):
curr_dir = os.getcwd()
os.chdir(repo_path)
os.system('git add bookmarks.md')
os.system(f'git commit -m "{str(datetime.now())}"')
os.system('git push origin master')
os.chdir(curr_dir)
def main():
reddit_user = input('Enter reddit username: ')
reddit_pass = getpass(f'Enter reddit password ({reddit_user}): ')
submissions = download_saved_submissions(reddit_user, reddit_pass)
submissions = remove_reddit_submissions(submissions)
bookmarks_path = 'bookmarks/'
write_to_bookmarks(bookmarks_path, submissions)
add_commit_push_bookmarks(bookmarks_path)
if __name__ == '__main__':
main()
``` |
{
"source": "jotron/Simulation",
"score": 4
} |
#### File: Simulation/code/credit.py
```python
import pygame
from pygame.locals import *
from tkinter import *
from random import *
import settings as s
pygame.init()
pygame.display.set_caption('credits')
screen = pygame.display.set_mode((800, 600))
screen_r = screen.get_rect()
font = pygame.font.SysFont("Arial", 40)
clock = pygame.time.Clock()
def game():
def do_event(event):
print("{},{}".format(event.x,event.y))
def jump(event):
app.hello_b.place(relx=random(),rely=random())
class App:
def __init__(self,master):
frame = Frame(master)
master.geometry("800x800")
master.title("Quit the programm please !")
master.bind("<Button-1>",do_event)
master.bind("<Button-1>",do_event)
frame.pack()
self.hello_b = Button(master,text="Quit",command=sys.exit)
self.hello_b.bind("<Enter>",jump)
self.hello_b.pack()
root = Tk()
app = App(root)
root.mainloop()
def credit():
credit_list = ["The Simulation"," ","Directed by:","<NAME> and <NAME>","", "Created by:","","Amin","Joel","Simon","","","","","","","","You wait until the end nice","Wait a bit..","","","","","","ps: It's possible to quit the programme"]
texts = []
# Render the text
for i, line in enumerate(credit_list):
s = font.render(line, 1, (10, 10, 10))
# Create a Rect for each Surface.
r = s.get_rect(centerx = screen_r.centerx, y = screen_r.bottom + i * 45)
texts.append((r, s))
while True:
for e in pygame.event.get():
if e.type == QUIT or e.type == KEYDOWN and e.key == pygame.K_ESCAPE:
return
screen.fill((180,180,180))
for r, s in texts:
# now we just move each rect by one pixel each frame
r.move_ip(0, -1)
# and drawing is as simple as this
screen.blit(s, r)
# if all rects have left the screen, we exit
if not screen_r.collidelistall([r for (r, _) in texts]):
game()
return
pygame.display.flip()
#framerate at 60 FPS
clock.tick(170)
credit()
```
#### File: code/pieces/runge-kutta.py
```python
from math import sqrt
"""Define method of Runge Kutta 4."""
def RK(f):
return lambda t, y, dt: (
lambda dy: (
lambda dy: (
lambda dy: (
lambda dy: (dy + 2*(dy + dy) + dy)/6
)( dt * f( t + dt , y + dy ) )
)( dt * f( t + dt/2, y + dy/2 ) )
)( dt * f( t + dt/2, y + dy/2 ) )
)( dt * f( t, y ) )
"""define function: y(t)"""
def y(t):
return (y(t))
"""define function: y'(t) that applies RK(f)"""
dy = RK(lambda t, y: y'(t))
"""define values for t0, y0, dt"""
t, y, dt = 1., 2., 1.
"""define number of while loop"""
while t <= 5:
print("y(%0.1f)\t= %6.2f " % ( t, y))
"""define new values for y(t) and t to begin a new while loop"""
t, y = t + dt, y + dy( t, y, dt )
``` |
{
"source": "jotruebl/Pyce_Tools",
"score": 3
} |
#### File: Pyce_Tools/pyce_tools/pyce_tools.py
```python
import pandas as pd
import math
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import os
import numpy
from openpyxl import load_workbook
from openpyxl.utils.dataframe import dataframe_to_rows
import datetime
import scipy.stats as stats
class inp(object):
var_names_uway = {
'TRIPLET_TripletBeta660' : 'Triplet Beta',
'TRIPLET_TripletCDOM' : 'CDOM',
'TRIPLET_TripletChl' : 'Chl-a',
'prokaryoticpico-syne' : 'Prok. Pico. Syn.',
'nanophyto2-20um' : 'Nanophytoplankton',
'picophyto<2um' : 'Picophytoplankton',
'chla_cdom': 'chla/cdom',
'SB21_SB21sal':'salinity'
}
def __init__(self, inp_type, inp_location, cyto_location, cyto_data, uway_bio_data, inp_data):
'''
Description
------------
INP object. Will consist of INP of a specific type and location and bio data collected from a specific location.
For example, an object named inp_uway would have inp of type seawater from location uway and cyto and uway bio data from uway. If you wanted to compare this
INP data with cyto from a different location, a new INP object would need to be made.
Parameters
------------
self : obj
The INP object.
inp_type : str
The sample type for which this sample was collected. [seawater, aerosol]
inp_location : str
Where sample was collected. [uway, ASIT, wkbtsml, wkbtssw, bubbler, coriolis]
cyto_location :
String to index the cyto_data df.
cyto_data : df
A dataframe of cyto data. Each row is an observation.
Index should be named 'datetime' and columns are whatever measurements were taken. A location column must be present as it is indexed.
uway_bio_data : df
A dataframe of bio data. Each row is an observation.
Index should be named 'datetime' and columns are whatever measurements were taken.
inp_data : df
A dataframe of INP data. Each row is an observation.
Index should be named 'datetime'. Columns should include process, filtered, location, type, temp, concentration (inp/ml or inp/l) and uncertainty.
'''
self.inp_type = inp_type
self.inp_location = inp_location
self.uway_bio = uway_bio_data
self.cyto_bio = cyto_data[cyto_data['location']==cyto_location]
self.inp = inp_data[(inp_data['location']==inp_location)&(inp_data['type']==inp_type)]
self.results = {}
def corr(self, data, regime, temp):
stat = pd.DataFrame()
n = pd.DataFrame()
pInput = pd.DataFrame()
statCombined = pd.DataFrame()
nCombined = pd.DataFrame()
regime = str(regime)
df=data
df = df.select_dtypes(exclude=['object'])
for column in df:
if column == regime:
continue
try:
stat = pd.DataFrame()
pInput = df.loc[:,[regime,column]].dropna()
ptest = stats.pearsonr(pInput[regime],pInput[column])
stat[column]=ptest
n = pInput.shape[0]
stat=stat.append({column:n}, ignore_index=True)
statT = stat.T
statCombined = statCombined.append(statT)
except ValueError:
continue
statCombined['variable'] = statCombined.index
# Remove self-correlations
statCombined['variable']=statCombined['variable'].astype(str)
statCombined =statCombined[~statCombined['variable'].str.startswith('-')]
# Calculate R^2 and name columns
statCombined = statCombined.rename(columns={0:'R',1:'p', 2:'n'})
statCombined['R^2'] = statCombined['R']**2
# Add information
statCombined['inp_temp']=temp
return(statCombined, df)
def sa_normalize(self, dA_total):
'''Function takes a dA_total and inp and calculates surface area normalized in units of INP/cm2
INPUT
dA_total: total surface area integrated across all particle sizes for each day. Index should be a day number and SA should be in units of um2/cm3. If no index name, the function names for us.
inp: INP/l of air. Columns should only be temperatures. Any other identifying information should be in the index. Index needs to include day. '''
# ensure inp type is correct
if self.inp_type != 'aerosol':
return print('This function only works for aerosol type INP.')
# rename dA_total index if needed
if dA_total.index.name != 'day':
dA_total.index.rename('day', inplace=True)
# add a column of day
if 'day' not in self.inp.columns:
self.inp['day'] = self.inp.index.day.astype(str)
# units of um2/cm3 convert to cm2/m3
dA_total_prep = dA_total*1e-8*1e6
# merge and set index
self.inp = pd.merge(self.inp.reset_index(), dA_total_prep.reset_index(), on='day', how='outer')
# calculate
self.inp['inp_sa_normalized'] = self.inp['inp/m^3'].div(self.inp['SA'])
return print('done')
def correlations(self, temps, process, inp_units, dfs=None, size=None):
self.results.update({process:{}})
for temp in temps:
self.results[process].update({temp:{}})
# sort index
self.inp = self.inp.sort_index()
self.uway_bio = self.uway_bio.sort_index()
self.cyto_bio = self.cyto_bio.sort_index()
# merge INP and uway bio dataframes on date. This process is slightly different if the INP type is aerosol
if self.inp_type =='aerosol':
inp_uway_bio = pd.merge_asof(self.inp[(self.inp['temp']==temp)&(self.inp['process']==process)&(self.inp['size']==size)][inp_units], self.uway_bio, left_index=True, right_index=True, direction='nearest')
else:
inp_uway_bio = pd.merge_asof(self.inp[(self.inp['temp']==temp)&(self.inp['process']==process)][inp_units], self.uway_bio, left_index=True, right_index=True, direction='nearest')
# merge INP with cyto dataframes on date
inp_uway_bio_cyto_bio = pd.merge_asof(inp_uway_bio, self.cyto_bio, left_index=True, right_index=True, direction='nearest')
df_corr = inp_uway_bio_cyto_bio
# if any dfs were given, merge them here
if dfs:
for df in dfs:
df = df.sort_index()
df_corr = pd.merge_asof(df_corr, df, left_index=True, right_index=True, direction='nearest')
[corrs, data_combined] = self.corr(df_corr, inp_units, temp)
self.results[process][temp].update({'corrs':corrs})
self.results[process][temp].update({'data':data_combined})
print(f'Calculating correlations {process} INP samples of type={self.inp_type} at {temp}...Done!')
def plot_corr_scatter(self, temp, units, processes, row_num):
colors = ['steelblue','firebrick']
tick_loc='inside'
tick_width=2
tick_length=10
line_color='black'
line_width=2
xticks=10
x_tick_angle=45
x_tick_font_size=16
x_tick_font_color='black'
x_title_size=20
x_title_color='black'
yticks=10
y_tick_angle = 90
y_tick_font_size=16
y_tick_font_color = "black"
y_title_size=20
y_title_color="black"
vert_space = .10
horz_space = .095
# rows_needed = round(len(self.var_names_uway.keys())/3) + 1
rows_needed = row_num
fig = make_subplots(rows = rows_needed, cols = 3, shared_xaxes=False, shared_yaxes=False,
horizontal_spacing = horz_space, vertical_spacing=vert_space, print_grid=False
)
row_num = 1
col_num = 1
for variable in self.var_names_uway:
ytitle=f'INP<sub>{self.inp_location}</sub>'
xtitle = variable
i = 0
for temperature in temp:
for process in processes:
datay_string = temperature
datax = self.results[process][temperature]['data'].dropna(subset=[units, variable])
fig.add_trace(go.Scatter(
name = process,
y= datax[units],
x= datax[variable],
mode="markers", connectgaps=True,
marker=dict(color=colors[i], symbol='circle-open')),
row=row_num,
col = col_num)
data_trendline=self.results[process][temperature]['data'].dropna(subset=[variable, units])
fig_trend = px.scatter(data_trendline, x=variable, y=units, trendline="ols")
trendline = fig_trend.data[1]
fig_trend.update_traces(line=dict(color=colors[i],dash='dash'))
fig.add_trace(go.Scatter(trendline), row=row_num, col=col_num)
fig.update_xaxes(row=row_num, col = col_num,
title=dict(text=xtitle, font=dict(size=x_title_size, color=x_title_color)),
exponentformat='power', tickangle = x_tick_angle,
ticks=tick_loc, nticks=xticks, tickwidth=tick_width, ticklen=tick_length, showline=True,
linecolor=line_color,linewidth=line_width,
tickfont=dict(size=x_tick_font_size, color=x_tick_font_color))
fig.update_yaxes(row=row_num, col = col_num,
title=dict(text=ytitle,font=dict(size=y_title_size, color=y_title_color)),
exponentformat='power',
ticks=tick_loc, nticks=10, tickwidth=tick_width, ticklen=tick_length,
tickfont=dict(size=y_tick_font_size, color=y_tick_font_color),
showline=True, linecolor=line_color, linewidth=line_width)
fig.update_traces(marker=dict(size=10,
line = dict(width=2, color='DarkSlateGrey')))
# make for all situations. only bold if significant.
if self.results[process][temperature]['corrs'].loc[variable,'p'] < .05:
anno_text = '<b>R<sup>2</sup>=' + str(round(self.results[process][temperature]['corrs'].loc[variable,'R^2'],2))
elif self.results[process][temperature]['corrs'].loc[variable,'p'] > .05:
anno_text = 'R<sup>2</sup>=' + str(round(self.results[process][temperature]['corrs'].loc[variable,'R^2'],2))
x_val = [0.01, 0.41, 0.80]
y_val = round((1-(row_num-1)*(1/(rows_needed-.35)))-i*.035,3)
fig.add_annotation(
showarrow=False,
xref="paper",
yref="paper",
x=x_val[col_num-1],
y=y_val,
text = anno_text,
font=dict(size=18, color=colors[i]))
fig.update_layout(width=1500, height=400*rows_needed, template='plotly_white',showlegend=False, title=f'Correlation Scatter Plots for {self.inp_location} at {temperature}')
i+=1
col_num+=1
if col_num%4 == 0:
col_num=1
row_num +=1
#fig.write_image(selection+'.png', scale=3)
#fig.show()
return fig
def plot_ins_inp(self):
if self.inp_type != 'aerosol':
return print('INP object must be aerosol type. To plot seawater type INP, use plot_sml_inp or plot_ssw_inp.')
fig=go.Figure()
fig.add_trace(
go.Scatter(name='Unheated',
x=self.inp[self.inp['process'] == 'UH']['temp'],
y=self.inp[self.inp['process'] == 'UH']['inp_sa_normalized'],
mode='markers',
marker=dict(size=7, color='blue',line = dict(width=1, color='black'))))
fig.add_trace(
go.Scatter(name='Heated',
x=self.inp[self.inp['process'] == 'H']['temp'],
y=self.inp[self.inp['process'] == 'H']['inp_sa_normalized'],
mode='markers',
marker=dict(size=7, color='red',line = dict(width=1, color='black'))))
fig.add_trace(
go.Scatter(name='DeMott et al. (2016) Caribbean, Arctic, Pacific, Bering Sea - Ambient',
x=[-13, -25, -15, -13],
y=[1, 900, 20, 1],
mode="lines", connectgaps=True, line=dict(color="tomato", width=1.5))
)
fig.add_trace(
go.Scatter(name='DeMott et al. (2016) SIO Pier seawater - Waveflume/MART',
x=[-15, -25, -23, -13, -15],
y=[.2, 40, 100, 1, .2],
mode="lines", connectgaps=True)
)
fig.add_trace(
go.Scatter(name='McCluskey et al. (2017) SIO Pier seawater - Waveflume',
x=[-10, -23, -30, -30, -10, -10],
y=[1.5, 20, 300, 500, 70, 1.5],
mode="lines", connectgaps=True, line=dict(color="deepskyblue", width=1.5))
)
fig.add_trace(
go.Scatter(name='McCluskey et al. (2018b) Southern Ocean - Ambient',
x=[-12, -18, -26, -26, -12],
y=[.4, 0.2, 100, 8000, 0.4],
mode="lines", connectgaps=True, line=dict(color="darkgoldenrod", width=1.5))
)
fig.add_trace(
go.Scatter(name='Gong et al. (2020) Cabo Verde - Ambient',
x=[-10, -11, -20, -19, -10],
y=[2, .3, 12, 100, 2],
mode="lines", connectgaps=True, line=dict(color="seagreen", width=1.5))
)
fig.update_yaxes(type='log', title='INP per cm<sup>2</sup> of SSA Surface (D<sub>p</sub> = 10-500nm)', exponentformat='power')
fig.update_xaxes( title='Temperature (\u00B0C)',range=[-30.5,-5], exponentformat='power')
fig.update_yaxes(linewidth=2, tickwidth=2, ticklen=10, ticks='inside',
showline=True,linecolor='black', tickfont=dict(size=16), title_font=dict(size=16))
fig.update_xaxes(linewidth=2, tickwidth=2, ticklen=10,
ticks='inside', showline=True, linecolor='black', tickfont=dict(size=16), title_font=dict(size=16))
fig.update_layout(template='plotly_white',
width=700, height=600,
showlegend=True, legend=dict(title='', font=dict(size=10, color='black'), x=0.2, y=1.2, bordercolor="black", borderwidth=1))
return fig
def calculate_raw_blank(type_, process, location, sample_name, collection_date, analysis_date, issues, num_tubes, vol_tube = 0.2, rinse_vol = 20, size = None):
'''
Loads raw data from LINDA BLANK experiments and creates a 'calculated' INP data file using given arguments.
Saves the output as an XLSX file which can be later used as the blank in sample calculations of LINDA experiments.
Parameters
------------
type_ : str
The sample type for which this blank was collected. Note that mq is for any seawater samples, whether they be from the underway or workboat. [aerosol, mq]
process : str
Identify whether the sample has been left unfiltered (uf) or filtered (f). Unheated (UH) and heated (H) processes are already included in the file. [uf,f]
location : str
[bubbler, coriolis, mq, mq_wboat]
sample_name : str
Sample source name as seen on the vial. There is no rule on this, as it is simply stored as metadata, but ideally the sample name is following some kind of consistent standard.
collection_date : str
Sample collection date. Make note of your timezone, as the code does not assume one.
This is used to help locate the raw data file. [DDMMYYYY HHhMM].
analysis_date : str
Date of LINDA analysis in NZST time. [DD/MM/YY]
issues : str
Issues noted in the LINDA analysis log. [DEFAULT = None]
num_tubes : int
Number of tubes per heated/unheated analysis. [DEFAULT = 26]
vol_tube : int
Volume in ml of sample solution per tube. [DEFAULT = 0.2]
rinse_vol : int
Volume in ml of mq water used for rinsing filters, if the sample type makes use of a filter.
size : str
Size of particles for filter samples if sample was size resolved. [super, sub]
Returns
------------
xlsx
A spreadsheet of calculated blank data.
Notes
------------
| raw input data: \\[PROJECT_ROOT]\\data\\raw\\IN\\blank\\[FILE]
| calculated output file: \\[PROJECT_ROOT]\\data\\interim\\IN\\calculated\\blank\\[FILE]
'''
# load raw data depending on sample type
if type_ == 'mq':
date = collection_date[:6]
inpath = '..\\data\\raw\\IN\\blank\\' + location + '_'+ 'blank' + '_' + process + '_' + date + '.csv'
template = pd.read_excel('..\\in_calculation_template.xlsx', skiprows=1)
if type_ == 'aerosol':
date = collection_date[:6]
inpath = '..\\data\\raw\\IN\\blank\\' + location + '_'+ 'blank' + '_' + process + '_' + size + '_' + date + '.csv'
template = pd.read_excel('..\\in_calculation_template_aerosols.xlsx', skiprows=1)
raw = pd.read_csv(inpath, sep = ' ', header = None, parse_dates=False)
# create a datetime df to split up the date and time into separate columns, then insert them into the raw spreadsheet
datetime_col = raw[0].str.split(' ', expand=True)
raw.insert(0, 'day',datetime_col[0])
raw[0]=datetime_col[1]
# drop the random extra column at the end
raw=raw.drop(columns=61)
# set the column names so they match those in the template
raw.columns = template.columns
# create metadata dict depending on sample type
if type_ == 'mq_wboat' or type_ == 'mq':
meta_dict = {
'raw data source': inpath,
'type':type_,
'location':location,
'process':process,
'sample source name': sample_name,
'sample collection date': collection_date,
'sample analysis date': analysis_date,
'# tubes': num_tubes,
'ml/tube': vol_tube,
}
if type_ == 'aerosol':
meta_dict = {
'raw data source': inpath,
'type':type_,
'location':location,
'process':process,
'sample source name': sample_name,
'sample collection date': collection_date,
'sample analysis date': analysis_date,
'# tubes': num_tubes,
'ml/tube': vol_tube,
'rinse volume': rinse_vol,
'size': size,
}
# insert the raw data into the template
if type_ == 'mq_wboat' or type_ == 'mq':
template = load_workbook('..\\in_calculation_template.xlsx')
if type_ == 'aerosol':
template = load_workbook('..\\in_calculation_template_aerosols.xlsx')
template.remove(template["data.csv"])
sheet = template.create_sheet('data.csv')
for row in dataframe_to_rows(raw, index=False, header=True):
sheet.append(row)
sheet.insert_rows(idx=0)
# add metadata to spreadsheet
if type_ == 'aerosol':
row = 0
for key, value in meta_dict.items():
template['summary_UF_UH']['v'][row].value = key
template['summary_UF_UH']['w'][row].value = value
template['summary_UF_H']['v'][row].value = key
template['summary_UF_H']['w'][row].value = value
row +=1
if type_ =='mq_wboat' or type_ =='mq':
row = 0
for key, value in meta_dict.items():
template['summary_UF_UH']['z'][row].value = key
template['summary_UF_UH']['aa'][row].value = value
template['summary_UF_H']['z'][row].value = key
template['summary_UF_H']['aa'][row].value = value
row +=1
# save calculated report file to the appropriate folder
if type_ == 'mq_wboat' or type_ == 'mq':
template.save('..\\data\\interim\\IN\\calculated\\blank\\'+type_ + '_'+'blank'+'_' + process + '_' + date+'_calculated.xlsx')
if type_ == 'aerosol':
template.save('..\\data\\interim\\IN\\calculated\\blank\\'+location + '_'+'blank'+'_' + process + '_' + size + '_'+ date + '_calculated.xlsx')
return print('...Raw blank data calculated!')
def calculate_raw(blank_source, type_, location, process, sample_name,
collection_date, analysis_date, issues, num_tubes, vol_tube = 0.2, rinse_vol = 20, size = None,
flow_start = None, flow_stop = None, sample_stop_time = None):
'''
Calculates raw data.
Creates an XLSX spreadsheet of blank corrected, calculated INP data for samples using given args.
Resulting spreadsheet has a seperate tab for unheated and heated samples, with respective metadata in each.
Saves the output to interim calculated folder --> data/interim/IN/calculated/[seawater or aerosols].
Parameters
----------
blank_source : str
Path to source of calculated blank data. Currently accepts one file, but need to account for average of multiple files.
type_ : str
The sample type for which this blank was collected. [seawater, aerosol]
location : str
Where sample was collected. [uway, wboatsml, wboatssw, bubbler, coriolis]
process : str
Identify whether the sample has been unfiltered or filtered. Unheated and heated processes are already included in the file. [uf,f]
sample_name : str
sample source name as seen on the vial.
collection_date : str
sample collection date in NZST. [DDMMYYYY HHhMM]
analysis_date : str
LINDA analysis date in NZST. [DD/MM/YY]
issues : str
Issues noted in the LINDA analysis log. [DEFAULT = None]
num_tubes : int
Number of tubes per heated/unheated analysis. [DEFAULT = 26]
vol_tube : int
Volume in ml of sample solution per tube. [DEFAULT = 0.2]
rinse_vol : int
Volume in ml of mq water used for rinsing filters (aerosol sample types only).
size : str
Size of particles for filter samples (if applicable). Defaults to None if not given. Only used for aerosol samples. [super, sub]
flow_start : float
Flow rate in liters per minute (LPM) at start of sampling. Only used for aerosol samples.
flow_stop : float
Flow rate in LPM at end of sampling. Only used for aerosol samples.
sample_stop_time : str
Time in NZST at which sample collection was halted. Only valid for aerosol collections. [DDMMYYYY HHhMM]
Notes
------------
raw input data: \\[PROJECT_ROOT]\\data\\raw\\IN\\[SAMPLE_TYPE]\\[FILE]
calculated output file: \\[PROJECT_ROOT]\\data\\interim\\IN\\calculated\\[SAMPLE_TYPE]\\[SAMPLE_LOCATION]\\[FILE]
Examples
---------
Below is an example for a seawater INP sample collected from the workboat ssw.
>>> issues = 'Exact timing of sample collection on workboat is unknown; only that it was around 8-10 am.'
>>> blank_source = '..\\data\\interim\\IN\\calculated\\blank\\mq_wboat_blank_uf_260320_calculated.xlsx'
>>> type_ = 'seawater'
>>> location = 'wboatssw'
>>> process = 'uf'
>>> sample_name = 'TAN2003 IN 8b'
>>> collection_date = '23032020 08h00'
>>> analysis_date = '08/12/2020'
>>> num_tubes = 26
>>> vol_tube = 0.2
>>> raw = pt.calculate_raw(blank_source, type_, location, process, sample_name, collection_date, analysis_date, issues, num_tubes, vol_tube)
...IN data calculated!
Calculated report file saved to ..\data\interim\IN\calculated\seawater\wboat_ssw\seawater_wboatssw_uf_230320_0800_calculated.xlsx.
Below is an example for INP data from the bubbler.
>>> issues = 'None.'
>>> blank_source = '..\\data\\interim\\IN\\calculated\\blank\\bubbler_blank_uf_super_day10_day11_calculated_avg.xlsx'
>>> type_ = 'aerosol'
>>> size = 'super'
>>> location = 'bubbler'
>>> process = 'uf'
>>> sample_name = 'Day 09 bubbler 3 stage PC'
>>> collection_date = '25032020 11h45'
>>> sample_stop_time = '26032020 08h52'
>>> analysis_date = '31/07/2020'
>>> num_tubes = 26
>>> vol_tube = 0.2
>>> flow_start = 10.50
>>> flow_stop = 9.35
>>> rinse_vol = 20
>>> raw = pt.calculate_raw(blank_source, type_, location, process, sample_name, collection_date, analysis_date, issues, num_tubes, vol_tube, rinse_vol, size, flow_start, flow_stop, sample_stop_time)
...IN data calculated!
Calculated report file saved to ..\data\interim\IN\calculated\\aerosol\\bubbler\\aerosol_bubbler_uf_super_250320_1145_calculated.xlsx.
'''
# Dictionary for mapping actual datetime of sample to the variable collection_date. Used for locating files (due to my poor file naming scheme) and can be ignored by anyone not working with
# coriolis samples from the 2020 S2C Tangaroa Cruise.
coriolis_day_date = {
'18032020 13h00':'tg_04',
'21032020 13h00':'tg_6',
'25032020 14h00':'tg_9-2',
'22032020 14h00':'tg_7',
'23032020 13h00':'tg_8-2',
'19032020 15h00':'tg_5-7',
}
# load raw data depending on sample type (seawater vs aerosol)
if type_ == 'seawater':
# extract date and time from input parameters
date = collection_date[:6]
time = collection_date[9:11]+collection_date[12:]
# use input parameters to build path to source file
inpath = '..\\data\\raw\\IN\\' + type_ + '\\' + type_ + '_' + location + '_' + process + '_' + date + '_' + time + '.csv'
# load analysis template
template = pd.read_excel('..\\in_calculation_template.xlsx', skiprows=1)
if type_ == 'aerosol':
if location == 'bubbler':
# extract date and time from input parameters
date = collection_date[:6]
time = collection_date[9:11]+collection_date[12:]
# use input parameters to build path to source file
inpath = '..\\data\\raw\\IN\\' + type_ + '\\' + type_ + '_' + location + '_' + process + '_' + size + '_'+ date + '_' + time + '.csv'
# load analysis template
template = pd.read_excel('..\\in_calculation_template_aerosols.xlsx', skiprows=1)
if location == 'coriolis':
# extract date and time from input parameters
date = coriolis_day_date[collection_date]
# use input parameters to build path to source file
inpath = '..\\data\\raw\\IN\\' + type_ + '\\' + type_ + '_' + location + '_' + process + '_' + date + '.csv'
# load analysis template
template = pd.read_excel('..\\in_calculation_template_aerosols.xlsx', skiprows=1)
# read in the raw data csv
raw = pd.read_csv(inpath, sep = ' ', header = None, parse_dates=False)
# create a datetime df to split up the date and time into separate columns, then insert them
datetime_col = raw[0].str.split(' ', expand=True)
raw.insert(0, 'day',datetime_col[0])
raw[0]=datetime_col[1]
# drop the extra empty column at the end of every data file
raw=raw.drop(columns=61)
# set the column names so they match those in the template
raw.columns = template.columns
# create metadata dict depending on sample type
if type_== 'seawater':
meta_dict = {
'raw data source': inpath,
'type':type_,
'location':location,
'process':process,
'sample source name': sample_name,
'sample collection date': collection_date,
'sample analysis date': analysis_date,
'# tubes': num_tubes,
'ml/tube': vol_tube,
'issues':issues,
'sigma' : 1.96
}
if type_ == 'aerosol':
# Calculate sampling time in minutes
t_start=datetime.datetime.strptime(collection_date, '%d%m%Y %Hh%M')
t_end=datetime.datetime.strptime(sample_stop_time, '%d%m%Y %Hh%M')
sample_time = t_end - t_start
sample_time_mins = sample_time.total_seconds()/60
# Create metadata dictionary
meta_dict = {
'raw data source': inpath,
'type':type_,
'location':location,
'process':process,
'sample source name': sample_name,
'sample collection date': collection_date + 'through ' + sample_stop_time,
'sample analysis date': analysis_date,
'# tubes': num_tubes,
'ml/tube': vol_tube,
'rinse volume': rinse_vol,
'size': size,
'avg_flow': (flow_start+flow_stop)/2,
'sample collection time (minutes)': sample_time_mins
}
meta_dict['total air volume'] = meta_dict['avg_flow'] * meta_dict['sample collection time (minutes)']
meta_dict['issues'] = issues
meta_dict['sigma'] = 1.96
# insert the raw data into the template
if type_ == 'seawater':
template = load_workbook('..\\in_calculation_template.xlsx')
if type_ == 'aerosol':
template = load_workbook('..\\in_calculation_template_aerosols.xlsx')
template.remove(template["data.csv"])
sheet = template.create_sheet('data.csv')
for row in dataframe_to_rows(raw, index=False, header=True):
sheet.append(row)
sheet.insert_rows(idx=0)
# add metadata to spreadsheet - one in each of the two process sheets (UF_UH and UF_H)
if type_ == 'seawater':
row = 0
for key, value in meta_dict.items():
template['summary_UF_UH']['z'][row].value = key
template['summary_UF_UH']['aa'][row].value = value
template['summary_UF_H']['z'][row].value = key
template['summary_UF_H']['aa'][row].value = value
row += 1
if type_ == 'aerosol':
row = 0
for key, value in meta_dict.items():
template['summary_UF_UH']['v'][row].value = key
template['summary_UF_UH']['w'][row].value = value
template['summary_UF_H']['v'][row].value = key
template['summary_UF_H']['w'][row].value = value
row += 1
# read and add blank data.
blank_uf_uh = pd.read_excel(blank_source, sheet_name='summary_UF_UH')
blank_uf_h = pd.read_excel(blank_source, sheet_name='summary_UF_H')
row = 1
for x in blank_uf_uh['N(frozen)']:
template['summary_UF_UH']['f'][row].value = x
row += 1
row = 1
for x in blank_uf_h['N(frozen)']:
template['summary_UF_H']['f'][row].value = x
row += 1
# Save output depending on IN type
if type_ == 'seawater':
template.save('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+type_ + '_' + location + '_' + process + '_' + date + '_' + time +'_calculated.xlsx')
save_path = '..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+type_ + '_' + location + '_' + process + '_' + date + '_' + time +'_calculated.xlsx'
if type_ == 'aerosol':
if location == 'bubbler':
template.save('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+type_ + '_' + location + '_' + process + '_' + size + '_' + date + '_' + time + '_calculated.xlsx')
save_path = '..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+type_ + '_' + location + '_' + process + '_' + size + '_' + date + '_' + time + '_calculated.xlsx'
if location == 'coriolis':
template.save('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+type_ + '_' + location + '_' + process + '_' + date+'_calculated.xlsx')
save_path = '..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+type_ + '_' + location + '_' + process + '_' + date+'_calculated.xlsx'
return print(f'...IN data calculated!\nCalculated report file saved to {save_path}.')
def clean_calculated_in(type_, location):
'''
Creates an XLSX spreadsheet of cleaned data ready for analysis.
Parameters
------------
type_ : str
The sample type for which this blank was collected. [seawater, aerosol]
location : str
Where sample was collected. [uway, ASIT, wkbtsml, wkbtssw, bubbler, coriolis]
Notes
------------
raw input data: \\[PROJECT_ROOT]\\data\\interim\\IN\\calculated\\[SAMPLE_TYPE]\\[SAMPLE_LOCATION]\\[FILE]
cleaned output file: \\[PROJECT_ROOT]\\data\\interim\\IN\\cleaned\\combinedtimeseries\\[SAMPLE_TYPE]\\[FILE]
'''
# create a dataframe that will contain all of the data from each separate calculated file.
big_df = pd.DataFrame()
# cycle through all calculated report files in the folder
for file in os.listdir('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'):
# account for unheated and heated processes
procs = ['UH','H']
if type_=='seawater':
for process in procs:
# load the file
df = pd.read_excel('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+file, sheet_name='summary_UF_'+process)
# rename annoying column names
df['T (*C)'] =df['T (*C)'].round(1)
# create a df for transforming
current = pd.DataFrame()
# create transposed df and give it appropriate column names
fd = df.T
fd.columns = fd.loc['T (*C)',:]
# add data to dataframe
current= current.append(fd.loc['IN/ml',:], ignore_index=True)
current['datetime'] = fd.iloc[-1,4]
current['time'] = current['datetime'].iloc[0][9:]
# turn columns into strings so they aren't ints
current.columns = current.columns.astype(str)
# add label data
current['process'] = process
current['type'] = type_
current['location'] = location
current['filtered'] = 'uf'
# append this to the final dataframe
big_df=big_df.append(current)
elif type_ == 'aerosol' and location == 'bubbler':
for process in procs:
# load the file
df = pd.read_excel('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+file, sheet_name='summary_UF_'+process)
# rename annoying column names
df['T (*C)'] =df['T (*C)'].round(1)
# create a df for transforming
current = pd.DataFrame()
# create transposed df and give it appropriate column names
fd = df.T
fd.columns = fd.loc['T (*C)',:]
# add data to current
current=current.append(fd.loc['IN/L',:], ignore_index=True)
current['datetime'] = fd.iloc[-1,4][0:14]
current['start_date'] = fd.iloc[-1,4][0:14]
current['stop_date'] = fd.iloc[-1,4][21:]
# turn columns into strings so they aren't ints
current.columns = current.columns.astype(str)
# add label data
if 'super' in file:
current['size'] = 'super'
if 'sub' in file:
current['size'] = 'sub'
current['process'] = process
current['type'] = type_
current['location'] = location
current['filtered'] = 'uf'
# append this to the final big_df
big_df=big_df.append(current)
elif type_=='aerosol' and location == 'coriolis':
for process in procs:
# load the file
df = pd.read_excel('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+file, sheet_name='summary_UF_'+process)
# rename annoying column names
df['T (*C)'] =df['T (*C)'].round(1)
# create a df for transforming
current = pd.DataFrame()
# create transposed df and give it appropriate column names
fd = df.T
fd.columns = fd.loc['T (*C)',:]
# add data to current
current=current.append(fd.loc['IN/L (INP per liter of air)',:], ignore_index=True)
current['date'] = fd.iloc[-1,4]
current['hour'] = current['date'].iloc[0][9:]
current['start_date'] = current.loc[0,'date'][0:14]
current['stop_date'] = current.loc[0,'date'][23:]
# add label data
if 'super' in file:
current['size'] = 'super'
if 'sub' in file:
current['size'] = 'sub'
# turn columns into strings so they aren't ints
current.columns = current.columns.astype(str)
current['process'] = process
# append this to the final big_df
big_df=big_df.append(current)
# save output to combined time series folder
strt=big_df['datetime'].min()[0:8]
end=big_df['datetime'].max()[0:8]
out_name = strt+'_'+end
big_df.to_csv('..\\data\\interim\\IN\\cleaned\\combinedtimeseries\\'+type_+'\\'+location+'_'+out_name+'.csv', index=False)
#big_df.date=pd.to_datetime(big_df.datetime, dayfirst=True, format='%d%m%Y %Hh%M')
def clean_inverted(inpath, nbins, outpath):
'''
Accepts inverted scanotron data files from a specified given folder. Appends them into one dataframe and
sends them out to /interim/scanotron/combinedtimeseries/ folder. Also returns the completed dataframe as a variable for immediate use, as well as the file name and dLogDp value.
Parameters
------------
inpath : str
Path to inverted scanotron data files. [example: '..\\data\\interim\\"+instr+"\\inverted\\pro\\BHS\\']
nbins : int
Number of diameter bins for scanotron.
outpath : str
Desired location for the combined time series csv file. [example: '..\\data\\interim\\'+instr+'\\combinedtimeseries\\BHS\\']
Returns
------------
dfBig
Dataframe of combined time series of scanotron data. Rows are timestring and columns include time, diameters, and year, month, day, hour, minute, second, pex, tex, rhsh, tgrad, nb, dbeg, dend, conctotal.
outName
Name of the file that is saved to the computer.
dLogDp
Integer of dLogDp.
Notes
------------
inverted scanotron input data folder path: ..\\data\\interim\\"+instr+"\\inverted\\pro\\BHS\\
calculated output file: ..\\data\\interim\\'+instr+'\\combinedtimeseries\\BHS\\[FILE]
'''
dfBig=pd.DataFrame()
path=inpath
# Read all the files in the folder defined by inpath variable.
for file in os.listdir(path):
if file.endswith('.csv'):
df = pd.read_csv(path+file, skiprows=5,header=None,sep='\t')
dfBig = dfBig.append(df)
# Read in column names
columns = pd.read_csv(path+file, skiprows=3,nrows=0,sep='\t').columns.tolist()
# Remove all bad chars from column names
for name in range(len(columns)):
columns[name]=(columns[name]).strip()
columns[name]=(columns[name]).replace("#","")
columns[name]=columns[name].lower()
# Count number of missing column names (these are due to the size bins of the data)
num_missing_cols=nbins
# Calculate and add in new column names based on the size of the bins
start_bin = df.iloc[1,11] # Smallest Dp
end_bin = df.iloc[1,12] # Largest Dp
dLogDp=(math.log10(end_bin)-math.log10(start_bin))/(num_missing_cols-1)
num = math.log10(start_bin)
LogDp = [math.log10(start_bin)]
while num < math.log10(end_bin):
num = num + dLogDp
LogDp.append(num)
Dp = [10**j for j in LogDp]
Dp=[round(x) for x in Dp]
columns.remove('conc...')
dfBig.columns = columns + Dp
# Create datetimes
dfBig=dfBig.rename(columns={'yr':'year','mo':'month','dy':'day','hr':'hour','mn':'minute','sc':'second'})
dfBig['time']=pd.to_datetime(dfBig[['year', 'month', 'day', 'hour','minute','second']])
dfBig['timeString'] = dfBig['time'].dt.strftime('%Y-%m-%d %H:%M:%S')
# Set df index as datetime for easy parsing
dfBig=dfBig.set_index(['timeString'])
# Save as csv
#dfBig.to_csv(outPath+'out.csv')
strt=dfBig.index[0]
end=dfBig.index[-1]
outName = strt[0:10]+'_'+end[0:10]
dfBig.to_csv(outpath+outName+'.csv')
return dfBig, outName, dLogDp
def clean_magic(inpath, outpath, timezone):
'''
Loads all raw magic CPC data files, cleans it up, and appends it into one file.
Returns the cleaned dataset to chosen outpath as csv file.
The steps of the cleaning process are as follows:
1) open all data files in the data/raw folder path
2) append all data files into one df
3) remove bad chars in column names
4) create a timeString column in UTC time
5) save df to csv in specified folder
Parameters
------------
inpath : str
location where raw csv file is found.
outpath : str
location where cleaned csv file is saved.
Returns
------------
dfBig : df
the df that was just saved to a folder
outName : str
string of the start and end datetimes
'''
dfBig=pd.DataFrame()
path= inpath
#Read in all the files in a given folder.
for file in os.listdir(path):
if file.endswith('.csv'):
df = pd.read_csv(path+file, sep='\t', parse_dates=['# UTC '], skiprows=3)
dfBig = dfBig.append(df)
# Read in column names
columns = pd.read_csv(path+file,skiprows=3,nrows=0,sep='\t').columns.tolist()
# Remove all bad chars from column names
for name in range(len(columns)):
columns[name]=(columns[name]).strip()
columns[name]=(columns[name]).replace("#","")
columns[name]=(columns[name]).replace(" ","")
columns[name]=columns[name].lower()
columns[name]=(columns[name]).replace("utc","time")
dfBig.columns = columns
dfBig.time=dfBig.time+DateOffset(hours=1)
dfBig.set_index('time',inplace=True)
dfBig=dfBig.tz_localize(None)
dfBig=dfBig.tz_localize(timezone)
dfBig.reset_index(inplace=True)
dfBig['timeString'] = dfBig['time'].dt.strftime('%Y-%m-%d (%H:%M:%S)')
dfBig=dfBig.set_index(['timeString'])
strt=dfBig.index[0]
end=dfBig.index[-1]
outName = strt[0:10]+'_'+end[0:10]
dfBig.to_csv(outpath+outName+'.csv')
return dfBig, outName
def load_scano_data(date_string, instrument):
'''
Loads interim scanotron data that has already been pre-processed using the clean_inverted function. Returns two dataframes of dN and dNdLogDp where rows are time and columns are diameters.
Parameters
------------
date_string : str
Dates of scanotron data that are requested. [YYYY-MM-DD_YYYY_MM_DD]
instrument : str
Instrument that is being loaded. [scanotron]
Returns
------------
dN
A dataframe of particle counts where rows are dates and columns are diameters.
dNdLogDp
A dataframe of log-normalized particle counts where rows are dates and columns are diameters.
Notes
------------
raw input data: \\[PROJECT_ROOT]\\data\\interim\\scanotron\\combinedtimeseries\\[FILE]
'''
# Read in the cleaned and combinedtimeseries scanotron data
df=pd.read_csv(
'..\\data\\interim\\'+instrument+'\\combinedtimeseries\\'+date_string+'.csv',
parse_dates=['time'])
# Create a dNdlogDp dataframe
dNdLogDp_full = df.set_index('time').loc[:,'10':]
dLogDp=(math.log10(df.dend[1])-math.log10(df.dbeg[1]))/(df.nb[1]-1)
# Create a dN dataframe
dN=df.set_index('time').loc[:,'10':]*dLogDp
return dN, dNdLogDp_full
def surface_area(smps_daily_mean_df, smps_daily_std_df, nbins):
'''
'''
# Calculate log of particle diameter bin size (dLogDp)
start_bin = smps_daily_mean_df.index[0] # Smallest Dp
end_bin = smps_daily_mean_df.index[-1] # Largest Dp
dLogDp=(math.log10(end_bin)-math.log10(start_bin))/(nbins-1)
# Convert particle diameters to meters from nm
smps_daily_mean_df.reset_index(inplace=True)
smps_daily_mean_df['Dp (m)']=smps_daily_mean_df['Dp']*1e-9
smps_daily_std_df.reset_index(inplace=True)
smps_daily_std_df['Dp (m)'] = smps_daily_std_df['Dp']*1e-9
# Create particle diameter squared (Dp2) in units of meters^2
smps_daily_mean_df['Dp2 (m2)'] = smps_daily_mean_df['Dp (m)']**2
smps_daily_std_df['Dp2 (m2)'] = smps_daily_std_df['Dp (m)']**2
# Calculate factor A for particle surface area calculation in m^2, which is just diameter squared times Pi
smps_daily_mean_df['factorA'] = smps_daily_mean_df['Dp2 (m2)']*3.14159
smps_daily_mean_df.set_index('Dp',inplace=True)
smps_daily_std_df['factorA'] = smps_daily_std_df['Dp2 (m2)']*3.14159
smps_daily_std_df.set_index('Dp', inplace=True)
# Calculate particles/cm^3 of air (un-normalized) by multiplying through by the normalizing constant (log of particle diameter bin size)
dN=smps_daily_mean_df.iloc[:,:-3]*dLogDp
dN_std = smps_daily_std_df.iloc[:,:-3]*dLogDp
# Calculate surface area per unit volume of air (m^2/cm^3)
dA=dN.mul(smps_daily_mean_df['factorA'],0)
dA_std=dN_std.mul(smps_daily_std_df['factorA'],0)
# Convert surface area units from m^2/cm^3 to um^2/cm^3
dA=dA*1e12
dA_std=dA_std*1e12
# Normalize by log of particle diameter bin size (dLogDp) for surface area distribution (nm^2/cm^3)
dAdLogDp = (dA*1e6)/dLogDp
dAdLogDp_std = (dA_std*1e6)/dLogDp
# Sum all size bins to calculate total surface area (um^2/cm^3)
dA_total=dA.sum(axis=0).to_frame()
dA_total.rename(columns={0:'SA'},inplace=True)
dN_total=dN.sum(axis=0).to_frame()
dN_total.rename(columns={0:'DN'},inplace=True)
return dAdLogDp, dA_total, dN_total, dAdLogDp_std
def plot_number_dist(smps_daily_mean_df, smps_daily_std_df):
'''
Creates a plot of scanotron data.
Parameters
------------
smps_daily_mean_df : pandas dataframe
Size distribution data where rows are size bins and columns are the mean of daily (or other timespan) data.
smps_daily_std_df : pandas dataframe
Size distribution standadr deviation data where rows are size bins and columns are the mean of daily (or other timespan) data.
Returns
------------
fig : object
A plotly graph object.
'''
# create melted df
# create melted df
smps_daily_mean_melt = pd.melt(smps_daily_mean_df.reset_index(),id_vars=['Dp'], value_name='counts', var_name='sample')
smps_daily_std_melt = pd.melt(smps_daily_std_df.reset_index(),id_vars=['Dp'], value_name='counts', var_name='sample')
smps_daily_mean_melt['sample'] = smps_daily_mean_melt['sample'].astype(str)
smps_daily_std_melt['sample'] = smps_daily_std_melt['sample'].astype(str)
tick_width=1
tick_loc='inside'
line_color='black'
line_width=1
xticks=7
x_tick_angle=45
x_title_size=14
x_tick_font_size=12
x_tick_font_color='black'
x_title_color='black'
yticks=7
y_title_size=16
y_tick_font_size=14
y_title_color="black"
y_tick_font_color = "black"
fig=px.line(smps_daily_mean_melt, x='Dp', y='counts', color='sample', facet_col='sample',
facet_col_wrap=5, error_y=smps_daily_std_melt['counts'])
fig.update_xaxes(type='log', title='D<sub>p</sub> (nm)')
fig.update_yaxes(type='log', title='dN/dLogD<sub>p</sub> (particles/cm<sup>3</sup>)')
fig.update_yaxes(range=[1,3.8],titlefont=dict(size=y_title_size, color=y_title_color),
ticks=tick_loc, nticks=yticks, tickwidth=tick_width,
tickfont=dict(size=y_tick_font_size, color=y_tick_font_color),
showline=True, linecolor=line_color, linewidth=line_width)
fig.update_xaxes(showticklabels=True,ticks=tick_loc, nticks=xticks, tickwidth=tick_width, showline=True,
linecolor=line_color,linewidth=line_width,
titlefont=dict(size=x_title_size, color=x_title_color),
tickfont=dict(size=x_tick_font_size, color=x_tick_font_color))
#for anno in fig['layout']['annotations']:
# anno['text']=anno.text.split('=')[1]
#for axis in fig.layout:
# if type(fig.layout[axis]) == go.layout.YAxis and fig.layout[axis].anchor not in ['x','x16', 'x21', 'x11', 'x6']:
# fig.layout[axis].title.text = ''
fig.update_traces(mode='markers+lines')
fig.update_layout(showlegend=False, width=1100, height=1300, template='plotly_white')
return fig
#fig.show(renderer="jpg")
#fig.write_image("manuscripts\\IN\\FIGURES\\figS4.png")
def plot_surface_dist(dAdLogDp, dAdLogDp_std):
dAdLogDpMelt=dAdLogDp.reset_index().melt(id_vars='Dp')
dAdLogDpMelt['variable']=dAdLogDpMelt['variable'].astype(str)
dAdLogDpMelt_std = dAdLogDp_std.reset_index().melt(id_vars='Dp')
dAdLogDpMelt_std['variable'] = dAdLogDpMelt_std['variable'].astype(str)
dAdLogDpMelt['value_2'] = dAdLogDpMelt['value'] * 1e-6
dAdLogDpMelt_std['value_2'] = dAdLogDpMelt_std['value'] * 1e-6
tick_width=1
tick_loc='inside'
line_color='black'
line_width=1
xticks=7
x_tick_angle=45
x_title_size=14
x_tick_font_size=12
x_tick_font_color='black'
x_title_color='black'
yticks=7
y_title_size=16
y_tick_font_size=14
y_title_color="black"
y_tick_font_color = "black"
fig=px.line(dAdLogDpMelt, x='Dp', y='value_2', color='variable',facet_col='variable', facet_col_wrap=5,
error_y=dAdLogDpMelt_std['value_2'])
fig.update_xaxes(type='log', title='Dp (nm)')
fig.update_yaxes(title='dA/dLogDp (\u03BCm<sup>2</sup>/cm<sup>3</sup>)', type='log')
fig.update_yaxes(range=[-1,3],titlefont=dict(size=y_title_size, color=y_title_color),
ticks=tick_loc, nticks=yticks, tickwidth=tick_width,
tickfont=dict(size=y_tick_font_size, color=y_tick_font_color),
showline=True, linecolor=line_color, linewidth=line_width)
fig.update_xaxes(showticklabels=True,ticks=tick_loc, nticks=xticks, tickwidth=tick_width, showline=True,
linecolor=line_color,linewidth=line_width,
titlefont=dict(size=x_title_size, color=x_title_color),
tickfont=dict(size=x_tick_font_size, color=x_tick_font_color))
for anno in fig['layout']['annotations']:
anno['text']=anno.text.split('=')[1]
for axis in fig.layout:
if type(fig.layout[axis]) == go.layout.YAxis and fig.layout[axis].anchor not in ['x','x16', 'x21', 'x11', 'x6']:
fig.layout[axis].title.text = ''
fig.update_traces(mode='markers+lines')
fig.update_layout(showlegend=False, width=1100, height=1300, template='plotly_white')
return fig
#fig.write_image("manuscripts\\IN\\FIGURES\\response_figs\\figS4.png", scale=4)
def wilsonLower(p, n=26, z = 1.96):
'''
p is the frozen fraction
n is number of tubes
z is confidence level
'''
try:
denominator = 1 + z**2/n
centre_adjusted_probability = p + z*z / (2*n)
adjusted_standard_deviation = math.sqrt((p*(1 - p) + z*z / (4*n)) / n)
lower_bound = (centre_adjusted_probability - z*adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z*adjusted_standard_deviation) / denominator
return(lower_bound)
except ValueError:
return(0)
def wilsonUpper(p, n=26, z = 1.96):
try:
denominator = 1 + z**2/n
centre_adjusted_probability = p + z*z / (2*n)
adjusted_standard_deviation = math.sqrt((p*(1 - p) + z*z / (4*n)) / n)
lower_bound = (centre_adjusted_probability - z*adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z*adjusted_standard_deviation) / denominator
return(upper_bound)
except ValueError:
return(0)
def calculate_wilson_errors(project, location, type_, n = 26):
'''
Takes calculated report files and creates a csv of error bars. The csv is saved in the same location as the cleaned combined time series data file.
lower and upper bounds for blank subtracted frozen fraction of tubes are calculated using subfunctions (upperBound, lowerBound, respectively).
These fractions are then converted to a number of blank subtracted tubes that are frozen (upper_N-BLNK, lower_N-BLNK, respectively).
These bounds are then converted into INP/tube upper and lower bounds. Then they are converted to IN/mL and IN/L upper and lower bounds.
Finally, the difference between each bound and the original observed value is calculated to determine the size of the error bars.
Parameters
------------
project : str
The project. This needs to be defined since projects before Sea2Cloud were saved in different formats. [me3, nz2020]
location : str
Where sample was collected. [uway, ASIT, wkbtsml, wkbtssw, bubbler, coriolis]
type_ : str
Description
n : int
Number of tubes.
Notes
-----
raw input data: \\[PROJECT_ROOT]\\data\\interim\\IN\\calculated\\[SAMPLE_TYPE]\\[SAMPLE_LOCATION]\\[FILE]
cleaned output file: \\[PROJECT_ROOT]\\data\\interim\\IN\\cleaned\\combinedtimeseries\\[SAMPLE_TYPE]\\[FILE]
'''
error_all = pd.DataFrame()
if project == 'me3':
for proc in ['FILTERED','UNFILTERED']:
for file in os.listdir("..\\data\\interim\\IN\\calculated\\"+type_+"\\"+proc+'\\'):
if file.endswith('.xls'):
error=pd.DataFrame()
singleFile= pd.read_excel('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+proc+'\\'+file, sheet_name='summary', header=5)
singleFile=singleFile.loc[:,'T (*C)':]
singleFile['lowerBound']=singleFile['FrozenFraction'].apply(wilsonLower)
singleFile['upperBound']=singleFile['FrozenFraction'].apply(wilsonUpper)
singleFile['upper_N-BLNK']=singleFile['upperBound']*n
singleFile['lower_N-BLNK']=singleFile['lowerBound']*n
singleFile['IN/tube_upper']=numpy.log(n)-numpy.log(n-singleFile['upper_N-BLNK'])
singleFile['IN/tube_lower']=numpy.log(n)-numpy.log(n-singleFile['lower_N-BLNK'])
singleFile['IN/ml_upper']=singleFile['IN/tube_upper']/.2
singleFile['IN/ml_lower']=singleFile['IN/tube_lower']/.2
singleFile['IN/L_upper']=singleFile['IN/ml_upper']*1000
singleFile['IN/L_lower']=singleFile['IN/ml_lower']*1000
error['IN/L_lower']=singleFile['IN/L_lower']
error['IN/L_upper']=singleFile['IN/L_upper']
error['error_y'] = singleFile['IN/L_upper'] - singleFile['IN/L']
error['error_minus_y'] = abs(singleFile['IN/L_lower'] - singleFile['IN/L'])
error.index = singleFile['T (*C)'].round(1)
error = error.T
error.index.rename('value_name', inplace=True)
error.reset_index(inplace=True)
name = file.split('-')
bag = name[2][1]
day = name[3]
day = day.replace('D','')
day = day.replace('.xls','')
day = int(day)
error['day'] = day
error['bag'] = bag
error['filtered/unfiltered'] = proc
error_all = pd.concat([error_all, error])
error_all.set_index(['day','bag','value_name','filtered/unfiltered'],inplace=True)
error_all.to_csv('C:\\Users\\trueblood\\projects\\me3\\data\\interim\\IN\\cleaned\\seawater\\IN_error_wilson.csv')
else:
for file in os.listdir("..\\data\\interim\\IN\\calculated\\"+type_+"\\"+location+'\\'):
if file.endswith('.xlsx'):
for proc in ['UH','H']:
if type_ == 'seawater':
error=pd.DataFrame()
singleFile= pd.read_excel('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+file, sheet_name='summary_UF_'+proc, header=0)
singleFile['lowerBound']=singleFile['FrozenFraction'].apply(wilsonLower)
singleFile['upperBound']=singleFile['FrozenFraction'].apply(wilsonUpper)
singleFile['upper_N-BLNK']=singleFile['upperBound']*n
singleFile['lower_N-BLNK']=singleFile['lowerBound']*n
singleFile['IN/tube_upper']=numpy.log(n)-numpy.log(n-singleFile['upper_N-BLNK'])
singleFile['IN/tube_lower']=numpy.log(n)-numpy.log(n-singleFile['lower_N-BLNK'])
singleFile['IN/ml_upper']=singleFile['IN/tube_upper']/.2
singleFile['IN/ml_lower']=singleFile['IN/tube_lower']/.2
singleFile['IN/L_upper']=singleFile['IN/ml_upper']*1000
singleFile['IN/L_lower']=singleFile['IN/ml_lower']*1000
error['IN/L_lower']=singleFile['IN/L_lower']
error['IN/L_upper']=singleFile['IN/L_upper']
error['error_y'] = singleFile['IN/L_upper'] - singleFile['IN/L']
error['error_minus_y'] = abs(singleFile['IN/L_lower'] - singleFile['IN/L'])
error.index = singleFile['T (*C)'].round(1)
error = error.T
error.index.rename('value_name', inplace=True)
error.reset_index(inplace=True)
name = file.split('_')
error['type'] = name[0]
error['location']=name[1]
error['filtered']=name[2]
error['date']=name[3]
error['time']=name[4][0:2]+'h'+name[4][2:4]
error['datetime_str'] = error.date.astype(str)+' '+error.time
error['datetime'] = pd.to_datetime(error['datetime_str'], format='%d%m%y %Hh%M')
error['process'] = proc
elif type_ == 'aerosol':
error=pd.DataFrame()
singleFile= pd.read_excel('..\\data\\interim\\IN\\calculated\\'+type_+'\\'+location+'\\'+file, sheet_name='summary_UF_'+proc, header=0)
error['IN/L_lower']=singleFile['lower INP/L']
error['IN/L_upper']=singleFile['upper INP/L']
error['error_y'] = error['IN/L_upper'] - singleFile['IN/L']
error['error_minus_y'] = abs(error['IN/L_lower'] - singleFile['IN/L'])
error.index = singleFile['T (*C)'].round(1)
error = error.T
error.index.rename('value_name', inplace=True)
error.reset_index(inplace=True)
name = file.split('_')
error['type'] = name[0]
error['location']=name[1]
error['filtered']=name[2]
error['size'] = name[3]
error['date']=name[4]
error['time']=name[5][0:2]+'h'+name[5][2:4]
error['datetime_str'] = error.date.astype(str)+' '+error.time
error['datetime'] = pd.to_datetime(error['datetime_str'], format='%d%m%y %Hh%M')
error['process'] = proc
error_all = pd.concat([error_all, error])
strt=error_all['date'].min()[0:8]
end=error_all['date'].max()[0:8]
out_name = strt+'_'+end
error_all = error_all.drop(columns='datetime_str')
error_all = error_all.drop(columns='date')
error_all.set_index(['type','location','filtered','value_name','time','process'],inplace=True)
error_all.to_csv('..\\data\\interim\\IN\\cleaned\\combinedtimeseries\\'+type_+'\\'+location+'_'+out_name+'wilson_error.csv')
def load_wilson_errors():
'''
See IN_Analysis_V1.ipynb. This will load and clean up the spreadsheets of error bars.
'''
pd.read_csv("C:\\Users\\trueblood\\projects\\me3\\data\\interim\\IN\\cleaned\\seawater\\IN_error_wilson.csv", sep=',', index_col=[0,1,2])
errors_melt = pd.melt(errors.reset_index(), id_vars=['bag','day', 'value_name'], value_name='error', var_name='T')
def plot_sml_inp(inp_df):
'''
This accepts a dataframe of INP values along with their uncertainties and plots heated vs unheated as well as literature values.
'''
tick_width=1
tick_loc='inside'
line_color='black'
line_width=1
xticks=10
x_tick_angle=45
x_title_size=15
x_tick_font_size=14
x_tick_font_color='black'
x_title_color='black'
y_title_size=15
y_tick_font_size=10
y_title_color="black"
y_tick_font_color = "black"
fig = go.Figure()
fig.add_trace(go.Scatter(mode='markers',
x = inp_df[inp_df['process']=='UH']['temp'], y = inp_df[inp_df['process']=='UH']['inp/l'], name = 'Unheated SML',
error_y=dict(
type='data',
symmetric=False,
array=inp_df[inp_df['process']=='UH']['error_y'],
arrayminus=inp_df[inp_df['process']=='UH']['error_minus_y'],
thickness=1.5,
width=5)
))
fig.add_trace(go.Scatter(mode='markers',
x = inp_df[inp_df['process']=='H']['temp'], y = inp_df[inp_df['process']=='H']['inp/l'], name = 'Heated SML',
error_y=dict(
type='data',
symmetric=False,
array=inp_df[inp_df['process']=='H']['error_y'],
arrayminus=inp_df[inp_df['process']=='H']['error_minus_y'],
thickness=1.5,
width=5)
))
fig.add_trace(go.Scatter(mode='lines',x = [-15,-20,-25,-22, -17, -15, -15], y = [0.6*10**3,0.6*10**3,60*10**3,60*10**3, 2*10**3, 2*10**3, 0.6*10**3], name = 'McCluskey et al. (2018) (Southern Ocean)'))
fig.add_trace(go.Scatter(mode='lines',x = [-5,-16,-25,-10, -5], y = [3*10**4,3*10**4,4*10**6,4*10**6, 3*10**4], name = 'Wilson et al. (2015)'))
fig.add_trace(go.Scatter(mode='lines', x = [-10,-22,-28,-22, -15, -10], y = [3*10**4,3*10**4,1*10**7,1*10**7, 1*10**6, 3*10**4], name = 'Irish et al. (2017)'))
fig.add_trace(go.Scatter(mode='lines', x = [-5,-13,-16,-17, -11, -5], y = [2*10**4,2*10**4,2*10**5,4*10**6, 4*10**6, 2*10**4], name = 'Irish et al. (2019)'))
fig.add_trace(go.Scatter(mode='lines', x = [-9,-15,-16,-27, -24, -9], y = [1.8*10**2,1.8*10**2,2*10**3,4*10**6, 4*10**6, 1.8*10**2], name = 'Gong et al. (2020)'))
fig.add_trace(go.Scatter(mode='lines', x = [-10,-15,-16.5,-16.5, -15, -10], y = [1.9*10**2,1.9*10**2,9*10**2,2*10**4, 2*10**4, 1.9*10**2], name = 'Trueblood et al. (2020) - Microlayer'))
fig.update_yaxes(exponentformat='power', type='log')
fig.update_yaxes(nticks=20, range=[1.3,5], title="INP/L",
titlefont=dict(size=y_title_size, color=y_title_color),
ticks=tick_loc, tickwidth=tick_width, tickfont=dict(size=y_tick_font_size, color=y_tick_font_color),
showline=True, linecolor=line_color, linewidth=line_width)
fig.update_xaxes(title='Temperature (\u00B0C)', range=[-20,-3], ticks=tick_loc, nticks=xticks, tickwidth=tick_width,
showline=True, linecolor=line_color, linewidth=line_width,
tickangle=x_tick_angle, tickfont=dict(size=x_tick_font_size, color=x_tick_font_color),
title_font=dict(size=x_title_size, color=x_title_color),
)
fig.update_traces(marker=dict(size=5,line = dict(width=.5, color='black')
))
fig.update_layout(template='plotly_white', height=600, width=700, showlegend=True,
legend=dict(title='',font=dict(size=14, color='black'), x=0.62, y=.5, bordercolor="Black", borderwidth=1))
return fig
def plot_ssw_inp(inp_df):
'''
This accepts a dataframe of INP values along with their uncertainties and plots heated vs unheated as well as literature values.
'''
tick_width=1
tick_loc='inside'
line_color='black'
line_width=1
xticks=10
x_tick_angle=45
x_title_size=15
x_tick_font_size=14
x_tick_font_color='black'
x_title_color='black'
y_title_size=15
y_tick_font_size=10
y_title_color="black"
y_tick_font_color = "black"
fig = go.Figure()
fig.add_trace(go.Scatter(mode='markers',
x = inp_df[inp_df['process']=='UH']['temp'], y = inp_df[inp_df['process']=='UH']['inp/l'], name = 'Unheated SSW',
error_y=dict(
type='data',
symmetric=False,
array=inp_df[inp_df['process']=='UH']['error_y'],
arrayminus=inp_df[inp_df['process']=='UH']['error_minus_y'],
thickness=1.5,
width=5)
))
fig.add_trace(go.Scatter(mode='markers',
x = inp_df[inp_df['process']=='H']['temp'], y = inp_df[inp_df['process']=='H']['inp/l'], name = 'Heated SSW',
error_y=dict(
type='data',
symmetric=False,
array=inp_df[inp_df['process']=='H']['error_y'],
arrayminus=inp_df[inp_df['process']=='H']['error_minus_y'],
thickness=1.5,
width=5)
))
fig.add_trace(go.Scatter(mode='lines',x = [-15,-20,-25,-22, -17, -15, -15], y = [0.6*10**3,0.6*10**3,60*10**3,60*10**3, 2*10**3, 2*10**3, 0.6*10**3], name = 'McCluskey et al. (2018) (Southern Ocean)'))
fig.add_trace(go.Scatter(mode='lines',x = [-5,-16,-25,-10, -5], y = [3*10**4,3*10**4,4*10**6,4*10**6, 3*10**4], name = 'Wilson et al. (2015)'))
fig.add_trace(go.Scatter(mode='lines', x = [-10,-22,-28,-22, -15, -10], y = [3*10**4,3*10**4,1*10**7,1*10**7, 1*10**6, 3*10**4], name = 'Irish et al. (2017)'))
fig.add_trace(go.Scatter(mode='lines', x = [-5,-13,-16,-17, -11, -5], y = [2*10**4,2*10**4,2*10**5,4*10**6, 4*10**6, 2*10**4], name = 'Irish et al. (2019)'))
fig.add_trace(go.Scatter(mode='lines', x = [-9,-15,-16,-27, -24, -9], y = [1.8*10**2,1.8*10**2,2*10**3,4*10**6, 4*10**6, 1.8*10**2], name = 'Gong et al. (2020)'))
fig.add_trace(go.Scatter(mode='lines', x = [-13,-15,-16.5,-16.5, -14, -13], y = [1.9*10**2,1.9*10**2,4*10**2,4*10**3, 2*10**3, 1.9*10**2], name = 'Trueblood et al. (2020) - Seawater'))
fig.update_yaxes(exponentformat='power', type='log')
fig.update_yaxes(nticks=20, range=[1.3,5], title="INP/L",
titlefont=dict(size=y_title_size, color=y_title_color),
ticks=tick_loc, tickwidth=tick_width, tickfont=dict(size=y_tick_font_size, color=y_tick_font_color),
showline=True, linecolor=line_color, linewidth=line_width)
fig.update_xaxes(title='Temperature (\u00B0C)', range=[-20,-3], ticks=tick_loc, nticks=xticks, tickwidth=tick_width,
showline=True, linecolor=line_color, linewidth=line_width,
tickangle=x_tick_angle, tickfont=dict(size=x_tick_font_size, color=x_tick_font_color),
title_font=dict(size=x_title_size, color=x_title_color),
)
fig.update_traces(marker=dict(size=5,line = dict(width=.5, color='black')
))
fig.update_layout(template='plotly_white', height=600, width=700, showlegend=True,
legend=dict(title='',font=dict(size=14, color='black'), x=0.62, y=.5, bordercolor="Black", borderwidth=1))
return fig
def clean_aqualog(instr, outpath):
'''
Loads all raw aqualog data files for a given instrument (aqlog1 or aqlog2) and cleans it up.
Returns the cleaned dataset to chosen outpath.
The steps of the cleaning process are as follows:
1) open all data files in te data/raw folder path
2) append all data files into one df
3) remove bad chars in column names
4) create a timeString column in UTC time
5) save df to csv in specified folder
Parameters
----------
instr : string
aqualog1 or aqualog2
outpath : string
location where cleaned csv file is saved.
Returns
-------
dfBig : df
the df that was just saved to a folder
outName : string
string of the start and end datetimes
'''
dfBig=pd.DataFrame()
path='C:\\Users\\trueblood\\projects\\nz2020\\New-Zealand-2020\\data\\raw\\'+instr+'\\'
#Read in all the files in a given folder.
for file in os.listdir(path):
if file.endswith('.csv'):
df = pd.read_csv(path+file, sep='\t', parse_dates=['# UTC ISO8601'])
dfBig = dfBig.append(df)
# Read in column names
columns = pd.read_csv(path+file,nrows=0,sep='\t').columns.tolist()
# Remove all bad chars from column names
for name in range(len(columns)):
columns[name]=(columns[name]).strip()
columns[name]=(columns[name]).replace("#","")
columns[name]=(columns[name]).replace(" ","")
columns[name]=columns[name].lower()
columns[name]=(columns[name]).replace("utciso8601","time")
dfBig.columns = columns
dfBig['timeString'] = dfBig['time'].dt.strftime('%Y-%m-%d %H:%M:%S')
dfBig=dfBig.set_index(['timeString'])
dfBig.time=dfBig.time.dt.tz_localize(None)
strt=dfBig.index[0]
end=dfBig.index[-1]
outName = strt[0:10]+'_'+end[0:10]
dfBig.to_csv(outpath+outName+'.csv')
return dfBig, outName
``` |
{
"source": "jotslo/dolphin-auto-upscale",
"score": 2
} |
#### File: dolphin-auto-upscale/src/main.py
```python
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from os import system, path, makedirs
from modules import data
from time import sleep
load_path = path.expanduser(f"{data.dolphin_path}\\Load\\Textures")
dump_path = path.expanduser(f"{data.dolphin_path}\\Dump\\Textures")
script_path = "\\".join(__file__.split("\\")[:-1])
input(data.startup_message)
system("cls")
print(data.started_message)
class MonitorFolder(FileSystemEventHandler):
def get_load_path(self, file_path):
game_title = file_path.split("\\")[-2]
file_name = file_path.split("\\")[-1]
new_directory = f"{load_path}\\{game_title}"
new_file_path = f"{new_directory}\\{file_name}"
try:
makedirs(new_directory)
except FileExistsError:
pass
return new_file_path
def on_created(self, event):
file_path = event.src_path
whitelisted = file_path.split(".")[-1] in data.extension_whitelist
blacklisted = any(name in file_path.split("\\")[-1]
for name in data.name_contains_blacklist)
if whitelisted and not blacklisted:
new_file_path = self.get_load_path(file_path)
system(data.upscale_command.format(
script_path, file_path, new_file_path, data.upscale_factor))
observer = Observer()
observer.schedule(MonitorFolder(), dump_path, recursive=True)
observer.start()
try:
while True:
sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
``` |
{
"source": "jotson/godot-build",
"score": 2
} |
#### File: jotson/godot-build/version.py
```python
import subprocess
import configparser
import config
def get_version():
cfg = configparser.ConfigParser()
cfg.read(config.VERSION_FILE)
return int(cfg['version']['version'])
def get_status():
cfg = configparser.ConfigParser()
cfg.read(config.VERSION_FILE)
status = cfg['version']['status']
status = status.replace('"', '') # Godot does strings differently
return status
def get_commit():
cfg = configparser.ConfigParser()
cfg.read(config.VERSION_FILE)
commit = cfg['version']['commit']
commit = commit.replace('"', '') # Godot does strings differently
return commit
def get_public_version():
version = get_version()
status = get_status()
return '%s-%s' % (version, status)
def increment_version():
cfg = configparser.ConfigParser()
cfg.read(config.VERSION_FILE)
version = int(cfg['version']['version']) + 1
cfg['version']['version'] = str(version)
with open(config.VERSION_FILE, 'w') as configfile:
cfg.write(configfile)
def get_latest_commit():
output = subprocess.check_output(['git', 'log', '-1', '--oneline', 'HEAD'])
commit = output.split()[0].decode('utf-8')
return commit
def get_latest_commit_message():
output = subprocess.check_output(['git', 'log', '-1', '--format=%s'])
title = output.decode('utf-8').strip()
output = subprocess.check_output(['git', 'log', '-1', '--format=%b', '--shortstat'])
message = output.decode('utf-8').strip()
return (title, message)
def get_branch():
output = subprocess.check_output(['git', 'status', '--short', '--branch'])
branch = output.decode('utf-8').split('...')[0].replace('## ', '')
return branch
def set_commit(commit):
cfg = configparser.ConfigParser()
cfg.read(config.VERSION_FILE)
# Godot does strings differently
cfg['version']['commit'] = '"%s"' % commit
with open(config.VERSION_FILE, 'w') as configfile:
cfg.write(configfile)
def tag_git():
subprocess.check_output(['git', 'tag', 'v%s' % get_public_version(), get_commit() ])
def get_last_tag():
tag = subprocess.check_output(['git', 'describe', '--tags', '--abbrev=0', '@^'])
return tag.decode('utf-8').strip()
def get_commits_since_last_tag():
SEP = "~~~"
tag = get_last_tag()
output = subprocess.check_output(['git', 'log', '--format=%s%n%b' + SEP, '%s..HEAD' % tag])
output = output.decode('utf-8').strip()
output = output.replace("\n\n", "\n")
lines = output.split(SEP)
updates = ""
fixes = ""
private = ""
for line in lines:
inner_lines = line.strip().split("\n");
line = ""
for inner in inner_lines:
if inner.startswith("- "):
inner = "; " + inner[2:]
line += inner
if len(line) == 0:
continue
if line.find("#private") >= 0:
private += "- " + line + "\n"
elif line.lower().startswith("fix"):
fixes += "- " + line + "\n"
else:
updates += "- " + line + "\n"
if len(updates) > 0:
output = "New and updated:\n\n" + updates.strip()
if len(fixes) > 0:
if len(output) > 0:
output += "\n\n"
output += "Fixes:\n\n" + fixes.strip()
if len(private) > 0:
if len(output) > 0:
output += "\n\n"
output += "Private:\n\n" + private.strip()
return output
def generate_changelog():
ver = get_public_version()
commits = get_commits_since_last_tag()
with open(config.CHANGELOG, 'w') as changelog:
changelog.write('# v%s\n\n' %ver)
changelog.write(commits)
def get_changelog():
with open(config.CHANGELOG, 'r') as changelog:
lines = changelog.readlines()
log = ''.join(lines)
return log
``` |
{
"source": "jotson/minecraft-notify",
"score": 3
} |
#### File: jotson/minecraft-notify/minecraft-notify.py
```python
import datetime
import smtplib
from email.mime.text import MIMEText
logfiles = ['/path/to/your/server.log'] # list of log files to scan
status = '/tmp/minecraft-server-last-scan' # last scan date is stored here
email = ['<EMAIL>', '<EMAIL>', '...']
ignore = ['username-to-ignore', 'username-to-ignore']
smtp_host = 'smtp host'
smtp_port = 587
smtp_user = 'smtp server login'
smtp_password = '<PASSWORD>'
smtp_from = '<EMAIL>'
def handleEvent(description):
# Skip users in ignore list
for user in ignore:
if description.find(user) != -1:
return
# Email notification
description = description.rstrip('\n')
msg = MIMEText(description)
msg['Subject'] = "[MINECRAFT] " + description
msg['From'] = smtp_from
msg['To'] = ', '.join(email)
s = smtplib.SMTP(smtp_host, smtp_port)
s.starttls()
s.login(smtp_user, smtp_password)
s.sendmail(smtp_from, email, msg.as_string())
s.quit()
if __name__ == '__main__':
# Load lastScan datetime
lastScan = None
try:
f = open(status, 'r')
data = f.readlines()
f.close()
lastScan = datetime.datetime.strptime(data[0], '%Y-%m-%d %H:%M:%S')
except:
pass
if lastScan is None:
lastScan = datetime.datetime.now()
f = open(status, 'w')
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
f.close()
for log in logfiles:
f = open(log, 'r')
data = f.readlines()
f.close()
ts = None
for line in data:
try:
(event_date, event_time, event_level, event_description) = line.split(' ', 3)
ts = datetime.datetime.strptime(event_date + ' ' + event_time, '%Y-%m-%d %H:%M:%S')
if ts > lastScan:
if event_description.find("joined the game") != -1:
handleEvent(event_description)
if event_description.find("left the game") != -1:
handleEvent(event_description)
except ValueError:
pass
``` |
{
"source": "JottaF/ProjetosFacul",
"score": 2
} |
#### File: noticias/app_noticias/models.py
```python
from django.contrib.auth.models import User
from django.db import models
class Pessoa(models.Model):
usuario = models.OneToOneField(
User, on_delete=models.CASCADE, verbose_name='Usuário')
nome = models.CharField(verbose_name='Nome', max_length=130)
data_de_nascimento = models.DateField()
email = models.EmailField(verbose_name='E-mail', null=True, blank=True)
def __str__(self):
return self.nome
class Tag(models.Model):
nome = models.CharField(max_length=64)
slug = models.SlugField(max_length=64)
def __str__(self):
return self.nome
class Noticia(models.Model):
class Meta:
verbose_name = 'Notícia'
verbose_name_plural = 'Notícias'
autor = models.ForeignKey(Pessoa, on_delete=models.CASCADE, null=True, blank=True)
tags = models.ManyToManyField(Tag)
titulo = models.CharField(max_length=120, null=True, blank=True)
resumo = models.TextField(max_length=500, null=True, blank=True)
descricao = models.TextField(null=True, blank=True)
def __str__(self):
return self.titulo
class MensagemDeContato(models.Model):
nome = models.CharField(max_length=120)
email = models.EmailField()
mensagem = models.TextField()
data = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
```
#### File: noticias/app_noticias/viewss.py
```python
from django.views.generic.edit import FormView
from django.http import HttpResponse, Http404
from django.shortcuts import render
from app_noticias.models import Noticia, MensagemDeContato, Tag
from app_noticias.forms import ContatoForm, TagForm, AddNoticiaForm
def home_page(request):
noticias = Noticia.objects.all()
return render(request, 'app_noticia/home_page.html', {'todas_noticias': noticias})
def resumo(request):
total = Noticia.objects.count()
html = '''
<html>
<body>
<h1>Resumo</h1>
<p> A quantidade total de noticias é {} </p>
</body>
</html>
'''.format(total)
return HttpResponse(html)
def resumo_template(request):
total = Noticia.objects.count()
return render(request, 'app_noticia/resumo.html', {'total': total})
def detalhes_noticia(request, noticia_id):
if request.user.is_authenticated:
try:
noticia = Noticia.objects.get(pk=noticia_id)
tags = []
for item in noticia.tags.all():
tags.append(item.nome)
texto = ', '.join(tags)
except Noticia.DoesNotExist:
raise Http404("Noticia não encontrada")
return render(request, 'app_noticia/detalhes.html', {'noticia': noticia, 'tags_texto': texto})
else:
return render(request, 'app_noticia/erro_acesso.html', {})
def contato(request):
formulario = ContatoForm()
return render(request, 'app_noticia/contato.html', {'form': formulario})
def salvar_mensagem(request):
if request.method == 'POST':
dados = request.POST
mensagem = MensagemDeContato()
mensagem.nome = dados.get('nome')
mensagem.email = dados.get('email')
mensagem.mensagem = dados.get('mensagem')
email = dados.get('email')
if '@gmail.com' in email:
return HttpResponse('Provedor de email nao suportado')
mensagem_texto = dados.get('mensagem')
palvras = ['defeito', 'erro', 'problema']
for palavra in palvras:
if palavra in mensagem_texto.lower():
return HttpResponse('Palavras nao permitidas')
mensagem.save()
return render(request, 'app_noticia/contato_sucesso.html', {})
def tag(request):
if request.user.is_authenticated:
formulario = TagForm()
return render(request, 'app_noticia/tag.html', {'form': formulario})
else:
return render(request, 'app_noticia/erro_acesso.html', {})
def salvarTag(request):
tagAdd = False
if request.method == 'POST':
dados = request.POST
tag = Tag()
tag.nome = dados.get('nome')
tag.slug = dados.get('slug')
tag.save()
tagAdd = True
return render(request, 'app_noticia/tag_sucesso.html', {'tagAdd':tagAdd})
def addNoticia(request):
if request.user.is_authenticated:
formulario = AddNoticiaForm()
return render(request, 'app_noticia/adicionar-noticia.html', {'form': formulario})
else:
return render(request, 'app_noticia/erro_acesso.html', {})
def postarNoticia(request):
tagAdd = False
if request.method == 'POST':
dados = request.POST
noticia = Noticia()
noticia.autor = request
tag.save()
tagAdd = True
return render(request, 'app_noticia/tag_sucesso.html', {'tagAdd':tagAdd})
``` |
{
"source": "jottenlips/tinyauth",
"score": 2
} |
#### File: aws_resources/tests/test_dynamo.py
```python
from aws_resources.dynamo import table
def test_table():
db = table()
assert db != None
``` |
{
"source": "jotterbach/dstk",
"score": 3
} |
#### File: DSTK/FeatureBinning/base_binner.py
```python
import abc
import numpy as np
import warnings
class BaseBinner(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def splits(self):
raise NotImplementedError
@splits.setter
def splits(self, values):
raise NotImplementedError
@abc.abstractproperty
def values(self):
raise NotImplementedError
@values.setter
def values(self, values):
raise NotImplementedError
@abc.abstractmethod
def fit(self, values, targets):
raise NotImplementedError
@abc.abstractproperty
def is_fit(self):
raise NotImplementedError
@is_fit.setter
def is_fit(self, is_fit):
raise NotImplementedError
def transform(self, values, **kwargs):
"""
# See output of 'fit_transform()'
# :param feature_values:
# :param class_index:
# :return:
# """
if not self.is_fit:
raise AssertionError("FeatureBinner has to be fit to the data first.")
class_index = kwargs.get('class_index', 1)
idx = np.searchsorted(self.splits, values, side='right')
# searchsorted on nan values gives wrong idx when input is nan
# as it assumes the nan value is "right" of a nan split
idx[np.where(np.isnan(values))[0]] -= 1
if class_index:
return np.asarray(self.values)[idx][:, class_index]
else:
return np.asarray(self.values)[idx]
def fit_transform(self, feature_values, target_values, **kwargs):
"""
:param feature_values: list or array of the feature values
:param target_values: list or array of the corresponding labels
:param class_index: Index of the corresponding class in the conditional probability vector for each bucket.
Defaults to 1 (as mostly used for binary classification)
:return: list of cond_proba_buckets with corresponding conditional probabilities P( T | x in X )
for a given example with value x in bin with range X to have label T and list of conditional probabilities for each value to be of class T
"""
self.fit(feature_values, target_values)
return self.transform(feature_values, **kwargs)
def add_bin(self, right_bin_edge, bin_value):
if right_bin_edge in self.splits:
warnings.warn("Bin edge already exists.", UserWarning)
return self
# use np.searchsorted instead of digitize as the latter
# gives the wrong result for purely non-numeric splits
# see corresponding test for the issue
idx = np.searchsorted(self.splits, right_bin_edge, side='right')
self.splits = np.insert(self.splits, idx, right_bin_edge).tolist()
self.values = np.insert(self.values, [idx], bin_value, axis=0).tolist()
return self
def __str__(self):
return self.__repr__()
def __repr__(self):
sp = np.asarray(self.splits)
vals = np.asarray(self.values)
non_na_sp = sp[~np.isnan(sp)]
non_na_val = vals[~np.isnan(sp)]
na_val = vals[np.isnan(sp)].flatten()
non_na_str = ["<= {}: {}".format(split, val) for split, val in zip(non_na_sp, non_na_val)]
non_na_str += ["NaN: {}".format(na_val)]
return "\n".join(non_na_str)
```
#### File: DSTK/FeatureBinning/_utils.py
```python
from __future__ import division
import numpy as np
def _get_non_nan_values_and_targets(values, target):
feats = np.array(values)
labels = np.array(target)
return feats[~np.isnan(feats)].reshape(-1, 1), labels[~np.isnan(feats)].reshape(-1, 1)
def _process_nan_values(values, target, prior):
feats = np.array(values)
labels = np.array(target)
num_classes = np.bincount(target).size
num_nan = len(feats[np.isnan(feats)])
val_count = np.bincount(labels[np.isnan(feats)], minlength=num_classes)
if (num_nan == 0) and prior:
return [1 - prior, prior]
elif (num_nan == 0) and (prior is None):
return (np.ones((num_classes,), dtype='float64') / num_classes).tolist()
return list(val_count / num_nan)
def _filter_special_values(values, filter_values):
feats = np.array(values)
filters = np.array(filter_values)
special_vals_idx_dct = dict()
for val in filters:
if np.isnan(val):
idx = np.where(np.isnan(feats))[0]
else:
idx = np.where(feats == val)[0]
special_vals_idx_dct.update({str(val): idx})
all_special_idx = list()
for vals in special_vals_idx_dct.values():
all_special_idx += vals.tolist()
all_special_idx = np.asarray(all_special_idx)
all_idx = np.arange(0, len(feats), 1, dtype=int)
all_non_special_idx = [idx for idx in all_idx if idx not in all_special_idx]
special_vals_idx_dct.update({'regular': all_non_special_idx})
return special_vals_idx_dct
def _naive_bayes_bins(target, prior, num_classes=2):
if len(target) > 1:
cnts = np.bincount(target, minlength=num_classes)
return list(cnts / cnts.sum())
else:
return [1 - prior, prior]
```
#### File: DSTK/GAM/PSplineGAM.py
```python
from __future__ import division
import scipy as sp
import numpy as np
from statsmodels import api as sm
import time
import sys
from DSTK.GAM.utils.p_splines import _get_percentiles, _get_basis_vector, R
from DSTK.utils.function_helpers import sigmoid
from DSTK.GAM.gam import ShapeFunction
from DSTK.GAM.base_gam import BaseGAM
import pandas
from datetime import datetime
from DSTK.utils import sampling_helpers as sh
def _calculate_residuals(y, mu, eta):
# return (y - mu) / mu #+ eta
if y > 0:
ratio = np.exp(np.log(y) - np.log(mu))
else:
ratio = 0.0
return ratio - 1 #+ eta
_residuals = np.frompyfunc(_calculate_residuals, 3, 1)
class PSplineGAM(BaseGAM):
"""
This class implements learning a cubic spline interpolation for a binary classification problem. The implementation
is based on the P-IRLS algorithm as described in the book:
<NAME>, Generalized Additive Models, Chapman and Hall/CRC (2006)
"""
def _get_metadata_dict(self):
return {
'serialization_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S%z')
}
def __init__(self, **kwargs):
super(PSplineGAM, self).__init__()
self.num_percentiles = kwargs.get('num_percentiles', 10)
self.tol = kwargs.get('tol', 5e-4)
self.max_iter = kwargs.get('max_iter', 1000)
self.balancer = kwargs.get('balancer', None)
self.balancer_seed = kwargs.get('balancer_seed', None)
self._knots = None
self.spline = None
self.basis_matrix = None
self.coeffs = None
self.spline = None
self.scaler = None
self._intercept = None
self._individual_feature_coeffs = None
self.scalers_ = dict()
_allowed_balancers = ['global_upsample', 'global_downsample']
if not self.balancer is None:
if not self.balancer in _allowed_balancers:
raise NotImplementedError(
"Balancing method '{}' not implemented. Choose one of {}.".format(self.balancer,
_allowed_balancers))
def _init_properties(self, data, labels):
self.n_features = data.shape[1]
if isinstance(data, pandas.core.frame.DataFrame):
self.feature_names = data.columns.tolist()
data = data.as_matrix()
if self.feature_names is None:
self.feature_names = ['feature_{}'.format(dim) for dim in range(self.n_features)]
if isinstance(labels, pandas.core.series.Series):
labels = labels.values
return data, labels
def train(self, data, targets, **kwargs):
penalty = kwargs.get('penalty', None)
data, labels = self._init_properties(data, targets)
assert isinstance(data, np.ndarray), 'Data is not of type numpy.ndarray'
if data.ndim == 2:
self.n_features = data.shape[1]
else:
self.n_features = 1
if self.balancer == 'global_upsample':
data, targets = sh.upsample_minority_class(data, targets, random_seed=self.balancer_seed)
elif self.balancer == 'global_downsample':
data, targets = sh.downsample_majority_class(data, targets, random_seed=self.balancer_seed)
scaled_data = self._scale_transform(data)
self._create_knots_dict(data, self.num_percentiles)
data_basis_expansion = self._flatten_basis_for_fitting(scaled_data)
self.coeffs = self._get_initial_coeffs(scaled_data.shape[0])
if penalty:
data_basis_expansion = np.vstack((data_basis_expansion, np.sqrt(penalty) * self._penalty_matrix()))
y = np.asarray(targets.tolist() + np.zeros(((self.num_percentiles + 1) * self.n_features + 1, 1)).squeeze().tolist()).flatten().squeeze()
else:
y = np.asarray(targets.tolist())
norm = 0.0
old_norm = 1.0
idx = 0
start = time.time()
while (np.abs(norm - old_norm) > self.tol * norm) and (idx < self.max_iter):
eta = np.dot(data_basis_expansion, self.coeffs)
mu = sigmoid(eta)
# calculate residuals
z = _residuals(y, mu, eta)
if penalty:
z[len(targets) + 1:] = np.zeros(((self.num_percentiles + 1) * self.n_features, 1)).squeeze()
self.spline = sm.OLS(z, data_basis_expansion).fit()
self.coeffs = self.spline.params
old_norm = norm
norm = np.sum((z - self.spline.predict(data_basis_expansion)) ** 2)
sys.stdout.write("\r>> Iteration: {0:04d}, elapsed time: {1:4.1f} m, norm: {2:4.1f}".format(idx + 1, (time.time() - start) / 60, norm))
sys.stdout.flush()
idx += 1
sys.stdout.write('\n')
sys.stdout.flush()
self._intercept = self.coeffs[0]
# Note to get the regression coeefs we need to account for the individual
# feature of the attribute value itself in addition to the pentiles.
self._individual_feature_coeffs = self.coeffs[1:].reshape((self.n_features, self.num_percentiles + 1))
self._create_shape_functions(data)
self.is_fit = True
return self
def gcv_score(self):
X = self.spline.model.exog[:-(self.num_percentiles + 2), :]
n = X.shape[0]
y = self.spline.model.endog[:n]
y_hat = self.spline.predict(X)
hat_matrix_trace = self.spline.get_influence().hat_matrix_diag[:n].sum()
return n * np.power(y - y_hat, 2).sum() / np.power(n - hat_matrix_trace, 2)
def _get_basis_for_array(self, array):
return np.asarray([_get_basis_vector(array[:, self._get_index_for_feature(feat)],
self._knots[feat],
with_intercept=False).transpose() for feat in self.feature_names]).transpose()
def _create_knots_dict(self, data, num_percentiles):
self._knots = {feat: _get_percentiles(data[:, self._get_index_for_feature(feat)], num_percentiles=num_percentiles) for feat in self.feature_names}
def _flatten_basis_for_fitting(self, array):
# since we need to fix the intercept degree of freedom we add the intercept term manually and get the individual
# basis expansion without the intercept
basis_expansion = self._get_basis_for_array(array)
flattened_basis = np.ones((basis_expansion.shape[0], 1))
for idx in range(basis_expansion.shape[2]):
flattened_basis = np.append(flattened_basis, basis_expansion[:, :, idx], axis=1)
return flattened_basis
def _scale_transform(self, data):
transformed_data = data.copy()
for dim_idx in range(self.n_features):
scaler = _MinMaxScaler('dim_' + str(dim_idx))
transformed_data[:, dim_idx] = scaler.fit_transform(data[:, dim_idx])
self.scalers_.update({dim_idx: scaler})
return transformed_data
def _get_basis_vector(self, vals):
if isinstance(vals, float):
return np.asarray([1, vals] + R(vals, self._knots).tolist())
else:
return np.asarray([[1, val] + R(val, self._knots).tolist() for val in vals])
def _get_shape(self, feature_idx, vals):
scaler = self.scalers_.get(feature_idx)
scaled_vals = scaler.transform(vals)
basis_expansion = np.asarray([_get_basis_vector(scaled_vals, self._knots[self.feature_names[feature_idx]], with_intercept=True)]).squeeze()
feature_coeffs = np.asarray([self._intercept / self.n_features] + self._individual_feature_coeffs[feature_idx, :].tolist())
return np.dot(basis_expansion, feature_coeffs)
def _create_shape_functions(self, data):
shapes = dict()
for feat in self.feature_names:
dim_idx = self._get_index_for_feature(feat)
splits = np.unique(data[:, dim_idx].flatten()).tolist()
vals = self._get_shape(dim_idx, splits)
shapes[feat] = ShapeFunction(splits, vals, feat)
self.shapes = shapes
def _get_initial_coeffs(self, n_samples):
coeffs = np.zeros(((self.num_percentiles + 1) * self.n_features + 1, )).flatten()
coeffs[0] = 1 / (n_samples * ((self.num_percentiles + 1) * self.n_features + 1))
return coeffs
def _penalty_matrix(self):
S = np.zeros(((self.num_percentiles + 1) * self.n_features + 1, (self.num_percentiles + 1) * self.n_features + 1))
flat_knots = np.asarray([self._knots.get(feat) for feat in self.feature_names]).flatten()
penalty_matrix = np.real_if_close(sp.linalg.sqrtm(R.outer(flat_knots, flat_knots).astype(np.float64)), tol=10 ** 8)
penalty_matrix = np.insert(penalty_matrix,
np.arange(0, self.num_percentiles * self.n_features, self.num_percentiles),
0,
axis=1)
penalty_matrix = np.insert(penalty_matrix,
np.arange(0, self.num_percentiles * self.n_features, self.num_percentiles),
0,
axis=0)
S[1:, 1:] = penalty_matrix
return S
def predict(self, data):
scaled_data = self._scale_transform(data)
return sigmoid(self.spline.predict(self._flatten_basis_for_fitting(scaled_data)))
class _MinMaxScaler(object):
def __init__(self, name):
self.name = name
self._range_min = -1.0
self._range_max = 1.0
self.scale = None
self.min_val = None
self.max_val = None
def fit(self, values):
data = np.asarray(values, dtype=float)
self.min_val = data.min()
self.max_val = data.max()
self.scale = (self.max_val - self.min_val) / (self._range_max - self._range_min)
return self
def transform(self, values):
data = np.asarray(values, dtype=float)
return data / self.scale + (self._range_min - self.min_val / self.scale)
def fit_transform(self, values):
self.fit(values)
return self.transform(values)
```
#### File: tests/test_feature_binner/test_recursive_feature_binner.py
```python
import sklearn.datasets as ds
import numpy as np
from DSTK.FeatureBinning import decision_tree_binner as tfb
cancer_ds = ds.load_breast_cancer()
data = cancer_ds['data']
target = cancer_ds['target']
def test_recursion():
binner = tfb.DecisionTreeBinner('test', max_leaf_nodes=4)
binner.fit(data[:, 0], target)
np.testing.assert_equal(binner.splits, [13.094999313354492, 15.045000076293945, 16.924999237060547, np.PINF, np.NaN])
np.testing.assert_equal(binner.values, [[0.04905660377358491, 0.9509433962264151],
[0.2878787878787879, 0.7121212121212122],
[0.8148148148148148, 0.18518518518518517],
[0.9915254237288136, 0.00847457627118644],
[0.37258347978910367, 0.62741652021089633]])
def test_recursion_with_mdlp():
binner = tfb.DecisionTreeBinner('test', mdlp=True)
binner.fit(data[:, 0], target)
np.testing.assert_equal(binner.splits, [13.094999313354492, 15.045000076293945, 17.880001068115234, np.PINF, np.NaN])
np.testing.assert_equal(binner.values, [[0.04905660377358491, 0.9509433962264151],
[0.2878787878787879, 0.7121212121212122],
[0.8533333333333334, 0.14666666666666667],
[1.0, 0.0],
[0.37258347978910367, 0.62741652021089633]])
def test_str_repr_with_mdlp():
assert_str = \
"""<= 13.0949993134: [ 0.0490566 0.9509434]
<= 15.0450000763: [ 0.28787879 0.71212121]
<= 17.8800010681: [ 0.85333333 0.14666667]
<= inf: [ 1. 0.]
NaN: [ 0.37258348 0.62741652]"""
binner = tfb.DecisionTreeBinner('test', mdlp=True)
binner.fit(data[:, 0], target)
assert str(binner) == assert_str
def test_fit():
feats = [0, 1, 2, np.nan, 5, np.nan]
labels = [0, 1, 0, 1, 1, 0]
binner = tfb.DecisionTreeBinner('test', max_leaf_nodes=3)
binner.fit(feats, labels)
np.testing.assert_equal(binner.splits, [0.5, 1.5, np.PINF, np.NaN])
np.testing.assert_equal(binner.values, [[1.0, 0.0], [0.0, 1.0], [0.5, 0.5], [0.5, 0.5]])
def test_transform():
feats = [0, 1, 2, np.nan, 5, np.nan]
labels = [0, 1, 0, 1, 1, 0]
binner = tfb.DecisionTreeBinner('test', max_leaf_nodes=3)
binner.fit(feats, labels)
np.testing.assert_equal(binner.transform(feats, class_index=1), [0.0, 1.0, 0.5, 0.5, 0.5, 0.5])
def test_fit_transform():
feats = [0, 1, 2, np.nan, 5, np.nan]
labels = [0, 1, 0, 1, 1, 0]
binner = tfb.DecisionTreeBinner('test', max_leaf_nodes=3)
trans = binner.fit_transform(feats, labels, class_index=1)
np.testing.assert_equal(trans, [0.0, 1.0, 0.5, 0.5, 0.5, 0.5])
```
#### File: DSTK/Timeseries/recurrence_plots.py
```python
import _recurrence_map
import numpy as np
def poincare_map(ts, ts2=None, threshold=0.1):
rec_dist = poincare_recurrence_dist(ts, ts2)
return (rec_dist < threshold).astype(int)
def poincare_recurrence_dist(ts, ts2=None):
if ts2 is None:
return _recurrence_map.recurrence_map(ts, ts)
else:
return _recurrence_map.recurrence_map(ts, ts2)
``` |
{
"source": "jotterbach/ml_playground",
"score": 3
} |
#### File: learning_to_learn/learning_to_optimize/learning_to_learn.py
```python
import numpy as np
import tensorflow as tf
from collections import deque, namedtuple
from typing import Tuple
import random
Transition = namedtuple('Transition',
('actions', 'rewards', 'gradients', 'data', 'targets'))
logs_path = '/tmp/tensorflow_logs/example/'
class ReplayMemory(object):
"""
Simple convenience class to store relevant training traces and efficiently sample from them.
:param int capacity: Size of the replay memory
"""
def __init__(self, capacity: int):
self.capacity = capacity
self.memory = deque([], maxlen=self.capacity)
self.position = 0
def push(self, transition: Transition):
"""
Adds a new observation to the memory
:param transition: Transition object
:return: none
"""
self.memory.append(transition)
def sample(self, batchsize: int):
"""
Samples 'batchsize'd number of samples from the memory
:param batchsize:
:return: sample of Transition objects
"""
return random.sample(self.memory, batchsize)
def __len__(self):
return len(self.memory)
class Agent(object):
"""
Agent that learns the update rule for a logistic regression.
:param sess: Tensorflow session.
"""
def __init__(self, sess: tf.Session):
self.memory = ReplayMemory(5000)
self.batch_size = 30
self.sess = sess
self.mode = tf.estimator.ModeKeys.TRAIN
self._init_graph()
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-4)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
def _init_graph(self):
self.data = tf.placeholder(name='data',
dtype=tf.float32,
shape=[100, None])
self.targets = tf.placeholder(name='target',
dtype=tf.float32,
shape=[100, None])
self.actions = tf.placeholder(name='action',
shape=[None, 3 * 25],
dtype=tf.float32)
self.reward = tf.placeholder(name='reward',
shape=[None, 25],
dtype=tf.float32)
self.grads = tf.placeholder(name='gradients',
shape=[None, 3 * 25],
dtype=tf.float32)
self.last_action = tf.placeholder(name='last_action',
shape=[None, 3],
dtype=tf.float32)
# We encode the bias by adding a third column to the data filled with 1's
self.weights = tf.Variable(initial_value=[[1e-5, 1e-4, 0]],
name="logit_weights",
dtype=tf.float32,
expected_shape=[1, 3],
trainable=True)
with tf.name_scope("policy"):
# Subgraph to define the policy network. We construct the input from
# atomic observation objects
self.input_layer = tf.concat([self.actions, self.reward, self.grads],
axis=1,
name="state_concatenation")
self.dense = tf.layers.dense(inputs=self.input_layer,
units=50,
activation=tf.nn.softmax,
name='dense_1')
self.dropout = tf.layers.dropout(inputs=self.dense,
rate=0.4,
training=(self.mode == tf.estimator.ModeKeys.TRAIN),
name='dropout')
self.policy = tf.layers.dense(inputs=self.dropout,
units=3,
name='output_layer')
with tf.name_scope('update_weights'):
# We update the weights variables using the policy output and the weights from the
# previous transaction
self.weights = self.last_action - self.policy
with tf.name_scope("meta_loss"):
# The meta-loss is constructed by varying the input data of a logit and then generally
# trying to find the right weights:
self.logits = tf.log(tf.nn.sigmoid(tf.matmul(self.data, self.weights, transpose_b=True)))
self.loss = -1. * tf.reduce_mean(tf.matmul(self.targets, self.logits, transpose_a=True))
def _train_minibatch(self):
"""
Samples from the ReplayMemory and trains the policy on the sampled observations
:return: None
"""
batch: Tuple[Transition] = self.memory.sample(self.batch_size)
for obs in batch:
action = obs.actions
reward = obs.rewards
grad = obs.gradients
data = obs.data
targets = obs.targets
self.sess.run(self.optimizer.minimize(self.loss),
feed_dict={
self.actions: np.array(action).flatten().reshape(-1, 75),
self.reward: np.array(reward).flatten().reshape(-1, 25),
self.grads: np.array(grad).flatten().reshape(-1, 75),
self.last_action: np.array(action[-1]).flatten().reshape(-1, 3),
self.data: data,
self.targets: np.array(targets).reshape(100, -1)
})
def _run_single_round(self, x0: list):
"""
Runs a single optimization round on a fixed dataset to create new memories to train on.
:param x0: Initial value for the weights.
:return: None
"""
# initialize round with new data
mean0 = [.1, .1]
cov0 = [[1, .01], [.01, 1]]
mean1 = [-.1, -.1]
cov1 = [[1, .02], [.02, 1]]
data, targets = create_data_for_metaloss(mean0, mean1, cov0, cov1)
# augment the data with a constant np.ones field to incorporate bias term
data = np.concatenate([data, np.ones(data.shape[0]).reshape(data.shape[0], 1)], axis=1)
# Initialize finite state space with a maximum FIFO queue
action = deque([], maxlen=25)
reward = deque([], maxlen=25)
grad = deque([], maxlen=25)
for _ in range(25):
action.append([0, 0, 0]) # 2 weights + 1 bias
reward.append(0)
grad.append([0, 0, 0]) # updates to the actions, a.k.a. logit weights
action.append(x0)
rew = 0
reward.append(rew)
grad.append(len(x0) * [0.0])
# Run a single event by doing 100 iterations of the update rule.
for idx in range(101):
rew, grad_update, weight = self.sess.run(
[self.loss, self.policy, self.weights],
feed_dict={
self.actions: np.array(action).flatten().reshape(-1, 75),
self.reward: np.array(reward).flatten().reshape(-1, 25),
self.grads: np.array(grad).flatten().reshape(-1, 75),
self.last_action: np.array(action[-1]).flatten().reshape(-1, 3),
self.data: data,
self.targets: np.array(targets).reshape(100, -1)
})
if idx % 20 == 0:
print(rew, weight)
# adjust tensorflow output and push it to the ReplayMemory as observation
action.append(weight.squeeze().flatten().tolist())
reward.append(rew.flatten().tolist()[0])
grad.append(grad_update.squeeze().flatten().tolist())
obs = Transition(actions=action,
gradients=grad,
rewards=reward,
data=data,
targets=targets)
self.memory.push(obs)
def learn(self):
for _ in range(500):
self._run_single_round(list(np.random.normal(0, 1, size=3)))
if len(self.memory) >= self.batch_size:
self._train_minibatch()
def create_data_for_metaloss(mean0, mean1, cov0, cov1):
data0 = np.random.multivariate_normal(mean0, cov0, size=50)
data1 = np.random.multivariate_normal(mean1, cov1, size=50)
data = np.vstack([data0, data1])
target0 = np.zeros(shape=50)
target1 = np.ones(shape=50)
targets = np.hstack([target0, target1])
return data, targets
if __name__ == "__main__":
sess = tf.Session()
agent = Agent(sess)
agent.learn()
```
#### File: manual_neural_network/nn/nn.py
```python
import numpy as np
from tqdm import tqdm
def softmax(a):
"""
Softmax function that tries to avoid overflow by normalizing the input data
:param a: array or list
:return: softmax of a
"""
a = np.asarray(a)
exp_a = np.exp(a - a.max())
return exp_a / exp_a.sum(axis=1, keepdims=True)
def _forward_prop(model, X):
"""
Calculate the probabilities of a feature through a forward pass
:param model: dictionary containing the model parameters
:param X: matrix of (n_samples x n_features)
:return: vector of probabilities
"""
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
z1 = X.dot(W1) + np.repeat(b1, X.shape[0], axis=0)
a1 = np.tanh(z1)
z2 = a1.dot(W2) + np.repeat(b2, a1.shape[0], axis=0)
return softmax(z2)
def calculate_loss(model: dict, X: np.ndarray, y: np.ndarray) -> float:
"""
Calculates the normalized negative log-loss
:param model: dictionary containing the NN model
:param X: data used for evaluating the model
:param y: true labels corresponding to the rows of X
:return: log-loss
"""
probas = _forward_prop(model, X)
# negative log loss
log_probs = np.sum(- y * np.log(probas[:, y]))
return float(log_probs / len(y))
def predict(model, X):
"""
Predicts the labels of X given the model
:param model: dictionary containing the NN model
:param X: data used for evaluating the model
:return: predicted labels of X
"""
probas = _forward_prop(model, X)
return np.argmax(probas)
def build_and_train(n_hidden: int, n_classes: int, X: np.ndarray, y: np.ndarray) -> dict:
"""
:param n_hidden: number of nodes in the hidden layer
:param n_classes: number of classes that should be predicted
:param X: data used for evaluating the model
:param y: true labels corresponding to the rows of X
:return: dictionary of the model, containing the weights and biases of each layer
"""
W1 = np.random.normal(loc=0, scale=1e-4, size=(X.shape[1], n_hidden)) / n_classes
b1 = np.zeros((1, n_hidden))
W2 = np.random.normal(loc=0, scale=1e-4, size=(n_hidden, n_classes)) / n_hidden
b2 = np.zeros((1, n_classes))
epsilon = 0.00001
reg_lambda = 0.001
model = {
'W1': W1,
'b1': b1,
'W2': W2,
'b2': b2
}
for epoch in tqdm(range(5000)):
# forward pass
z1 = X.dot(W1) + np.repeat(b1, X.shape[0], axis=0)
a1 = np.tanh(z1)
z2 = a1.dot(W2) + np.repeat(b2, a1.shape[0], axis=0)
probas = softmax(z2)
# backprop
delta3 = probas.copy()
delta3[:, y] -= y
dW2 = a1.transpose().dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = (1 - np.power(a1, 2)) * (delta3.dot(W2.transpose()))
dW1 = X.transpose().dot(delta2)
db1 = np.sum(delta2, axis=0, keepdims=True)
# regularize
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
W1 -= epsilon * dW1
b1 -= epsilon * db1
W2 -= epsilon * dW2
b2 -= epsilon * db2
model = {
'W1': W1,
'b1': b1,
'W2': W2,
'b2': b2
}
if epoch % 10 == 0:
print("Loss after epoch %i: %f" % (epoch, calculate_loss(model, X, y)))
return model
``` |
{
"source": "jottew/obsidian.py",
"score": 3
} |
#### File: obsidian.py/obsidian/errors.py
```python
from __future__ import annotations
from aiohttp import ClientResponse
__all__: tuple = (
'ObsidianException',
'ObsidianSearchFailure',
'NoSearchMatchesFound',
'HTTPError',
'NodeNotConnected',
'NodeAlreadyExists',
'NodeCreationError',
'ObsidianConnectionFailure',
'ObsidianAuthorizationFailure',
'ObsidianSpotifyException',
'SpotifyHTTPError',
'SpotifyAuthorizationFailure'
)
class ObsidianException(Exception):
"""
Raised when an error related to this module occurs.
"""
class ObsidianSearchFailure(ObsidianException):
"""
Raised when searching for a track fails.
"""
class NoSearchMatchesFound(ObsidianSearchFailure):
"""
Raised when no matches are found via search query.
"""
def __init__(self, query: str) -> None:
super().__init__(f'No song matches for query {query!r}.')
class HTTPError(ObsidianException):
"""
Raised when an error via HTTP request occurs.
"""
def __init__(self, message: str, response: ClientResponse) -> None:
super().__init__(message)
self.response: ClientResponse = response
class ObsidianSpotifyException(ObsidianException):
"""
Raised when an error related to spotify occurs.
"""
class SpotifyHTTPError(ObsidianSpotifyException):
"""
Raised when an error via HTTP request [Spotify] occurs.
"""
def __init__(self, message: str, response: ClientResponse) -> None:
super().__init__(message)
self.response: ClientResponse = response
class SpotifyAuthorizationFailure(SpotifyHTTPError):
"""
Raised when authorizing to spotify fails.
"""
class NodeNotConnected(ObsidianException):
"""
Raised when a socket request is sent without the node being connected.
"""
class ObsidianConnectionFailure(ObsidianException):
"""
Raised when connecting fails.
Attributes
----------
node: :class:`.BaseNode`
The node that failed to connect.
original: Exception
The exception that was raised.
"""
def __init__(self, node, error: BaseException) -> None:
from .node import BaseNode
self.node: BaseNode = node
self.original = error
message = f'Node {node.identifier!r} failed to connect ({error.__class__.__name__}): {error}'
super().__init__(message)
@classmethod
def from_message(cls, node, message: str) -> ObsidianConnectionFailure:
instance = cls.__new__(cls)
instance.node = node
instance.original = None
super(ObsidianException, instance).__init__(message)
return instance
class ObsidianAuthorizationFailure(ObsidianConnectionFailure):
"""
Raised when connecting fails due to invalid authorization.
"""
def __new__(cls, node, *args, **kwargs) -> ObsidianAuthorizationFailure:
message = f'Node {node.identifier!r} failed authorization.'
return ObsidianConnectionFailure.from_message(node, message)
class NodeCreationError(ObsidianException):
"""
Raised when a node could not be successfully created.
"""
class NodeAlreadyExists(NodeCreationError):
"""
Raised when a node with the same identifier already exists.
"""
def __init__(self, identifier: str) -> None:
message = f'Node identifier {identifier!r} already exists.'
super().__init__(message)
```
#### File: obsidian.py/obsidian/http.py
```python
import logging
import asyncio
from aiohttp import ClientSession
from discord.backoff import ExponentialBackoff
from typing import Any, Coroutine, Dict, List, Optional
from .errors import HTTPError
__all__: tuple = (
'HTTPClient',
)
__log__: logging.Logger = logging.getLogger('obsidian.node')
class HTTPClient:
def __init__(self, session: ClientSession, host: str, port: str, password: str) -> None:
self.__session: ClientSession = session
self.__host: str = host
self.__port: str = port
self.__password: str = password
@property
def url(self) -> str:
return f'http://{self.__host}:{self.__port}/'
async def request(
self,
method: str,
route: str,
parameters: Optional[Dict[str, Any]] = None,
payload: Optional[Dict[str, Any]] = None,
*,
max_retries: int = 3,
backoff: Optional[ExponentialBackoff] = None,
headers: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
if max_retries < 1:
raise ValueError('Max retries must be at least 1.')
backoff = backoff or ExponentialBackoff()
url = self.url + route.lstrip('/')
_headers = {'Authorization': self.__password}
if headers:
_headers.update(headers)
kwargs = {
'method': method,
'url': url,
'headers': headers
}
if parameters is not None:
kwargs['params'] = parameters
if payload is not None:
kwargs['json'] = payload
for _ in range(max_retries):
async with self.__session.request(**kwargs) as response:
if 200 <= response.status < 300:
return await response.json()
delay = backoff.delay()
__log__.warning(f'HTTP | {response.status} status code while requesting from {response.url!r}, retrying in {delay:.2f} seconds')
await asyncio.sleep(delay)
message = f'HTTP | {response.status} status code while requesting from {response.url!r}, retry limit exhausted'
__log__.error(message)
raise HTTPError(message, response)
def load_tracks(self, identifier: str) -> Coroutine[Any, Any, Dict[str, Any]]:
return self.request('GET', '/loadtracks', parameters={
'identifier': identifier
})
def decode_track(self, track: str) -> Coroutine[Any, Any, Dict[str, Any]]:
return self.request('GET', '/decodetrack', parameters={
'track': track
})
def decode_tracks(self, tracks: List[str]) -> Coroutine[Any, Any, Dict[str, Any]]:
return self.request('POST', '/decodetracks', parameters={
'tracks': tracks
})
```
#### File: obsidian.py/obsidian/node.py
```python
import json
import aiohttp
import asyncio
import discord
import logging
from discord.ext import commands
from typing import Any, Dict, List, Optional, Union, overload
from .stats import Stats
from .player import Player
from .http import HTTPClient
from .websocket import Websocket
from .search import TrackSearcher
from .errors import NodeNotConnected
from .mixin import NodeListenerMixin
from .enums import OpCode, Source
from .track import Track, Playlist
from .spotify import SpotifyClient
Bot = Union[discord.Client, discord.AutoShardedClient, commands.Bot, commands.AutoShardedBot]
__all__: tuple = (
'Node',
)
__log__: logging.Logger = logging.getLogger('obsidian.node')
class BaseNode(object):
"""Represents the base class for all nodes.
You should use :class:`Node` instead.
Parameters
----------
bot: :class:`discord.Client`
The client or bot that this node belongs to.
host: str
The IP of the host that Obsidian is running on.
port: str
The port of the host that Obsidian is running on.
password: Optional[str]
The password needed to connect to O<PASSWORD>.
identifier: Optional[str]
The name to use to refer to this node. Defaults to `'MAIN'`
region: Optional[:class:`discord.VoiceRegion`]
The voice region this node will be in.
See Also
--------
:class:`Node`
"""
@overload
def __init__(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
heartbeat: Optional[float] = None,
secure: Optional[bool] = None,
**kwargs
) -> None:
...
@overload
def __init__(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
heartbeat: Optional[float] = None,
secure: Optional[bool] = None,
spotify: Optional[SpotifyClient] = None,
**kwargs
) -> None:
...
@overload
def __init__(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
heartbeat: Optional[float] = None,
secure: Optional[bool] = None,
spotify_client_id: Optional[str] = None,
spotify_client_secret: Optional[str] = None,
**kwargs
) -> None:
...
def __init__(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
**kwargs
) -> None:
self._bot: Bot = bot
self._host: str = host
self._port: str = str(port)
self._password: str = password or ''
self._identifier: str = identifier or 'MAIN'
self._region: Optional[discord.VoiceRegion] = region
self._players: Dict[int, Player] = {}
self._search: TrackSearcher = TrackSearcher(self)
self.__stats: Optional[Stats] = None
self.__session: aiohttp.ClientSession = kwargs.get('session') or aiohttp.ClientSession()
self.__loop: asyncio.AbstractEventLoop = kwargs.get('loop') or bot.loop
self.__task: Optional[asyncio.Task] = None
self.__ws: Optional[Websocket] = None
self.__http: HTTPClient = HTTPClient(
self.__session, self._host, self._port, self._password
)
spotify = kwargs.get('spotify')
spotify_client_id = kwargs.get('spotify_client_id')
spotify_client_secret = kwargs.get('spotify_client_secret')
self._spotify: Optional[SpotifyClient] = None
if spotify:
self._spotify = spotify
elif spotify_client_id and spotify_client_secret:
self._spotify = SpotifyClient(
spotify_client_id,
spotify_client_secret,
loop=self.__loop,
session=self.__session
)
self.__internal__: Dict[str, Any] = kwargs
self.__listeners__: Dict[str, List[callable]] = {}
def __repr__(self) -> str:
return f'<Node identifier={self._identifier!r}>'
@property
def bot(self) -> Bot:
""":class:`discord.Client`: The :class:`discord.Client` that this node corresponds to."""
return self._bot
@property
def host(self) -> str:
"""str: The IP of the host of Obsidian."""
return self._host
@property
def port(self) -> str:
"""str: The port of the host of Obsidian."""
return self._port
@property
def password(self) -> str:
"""str: The password needed to connect to Obsidian."""
return self._password
@property
def identifier(self) -> str:
"""str: The identifier for this node, specified in the constructor."""
return self._identifier
@property
def region(self) -> Optional[discord.VoiceRegion]:
""":class:`discord.VoiceRegion`: The voice region for this node, specified in the constructor."""
return self._region
@property
def session(self) -> aiohttp.ClientSession:
return self.__session
@property
def players(self) -> Dict[int, Player]:
return self._players
@property
def spotify(self) -> Optional[SpotifyClient]:
return self._spotify
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self.__loop
@property
def ws(self) -> Websocket:
return self.__ws
@property
def http(self) -> HTTPClient:
return self.__http
@property
def stats(self) -> Stats:
""":class:`.Stats` The statistics for this node returned by the websocket."""
return self.__stats
@property
def connected(self) -> bool:
"""bool: Whether or not this node is connected."""
if not self.__ws:
return False
return self.__ws.connected
def dispatch(self, event: str, *args, **kwargs) -> None:
raise NotImplementedError
def dispatch_event(self, player, raw_event: str, event, *args, **kwargs) -> None:
self.loop.create_task(
discord.utils.maybe_coroutine(self.dispatch, player, raw_event, event, *args, **kwargs)
)
for listener in self.__listeners__.get(event, []):
self.loop.create_task(
discord.utils.maybe_coroutine(listener, *args, **kwargs)
)
async def handle_ws_response(self, op: OpCode, data: dict) -> None:
raise NotImplementedError
async def connect(
self,
*,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
"""Establishes a websocket connection for this node.
Parameters
----------
session: Optional[:class:`aiohttp.ClientSession`]
The session to use when connecting.
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The event loop to use when connecting.
"""
await self.bot.wait_until_ready()
connect_kwargs = {
'heartbeat': self.__internal__.get('heartbeat'),
'secure': self.__internal__.get('secure', False)
}
self.__ws = Websocket(self, session or self.__session, loop or self.__loop, **connect_kwargs)
await self.ws.connect()
async def disconnect(self, *, force: bool = False) -> None:
"""Disconnects the current websocket connection for this node.
Parameters
----------
force: bool, default: False
Whether or not to force disconnection.
"""
if self.connected:
await self.ws.disconnect()
for player in self._players.values():
await player.disconnect(force=force)
self.__ws = None
if self.__task and not self.__task.done():
self.__task.cancel()
self.__task = None
__log__.info(f'NODE {self.identifier!r} | Node disconnected.')
async def destroy(self, *, force: bool = False) -> None:
"""Disconnects, deletes, and destroys this node.
Parameters
----------
force: bool, default: False
Whether or not to force disconnection.
"""
from .pool import NodePool
await self.disconnect(force=force)
del NodePool._nodes[self.identifier]
__log__.info(f'NODE {self.identifier!r} | Node has been destroyed.')
async def send(self, op: Union[OpCode, int], data: dict) -> None:
"""Sends a message to the Obsidian websocket.
Parameters
----------
op: Union[:class:`.OpCode`, int]
The Op-Code of the request.
data: Dict[str, Any]
The JSON payload to send.
"""
if not self.connected:
raise NodeNotConnected(f'Node {self.identifier!r} is not connected.')
if not isinstance(op, int):
op = op.value
payload = {'op': op, 'd': data}
data = json.dumps(payload)
if isinstance(data, bytes):
data = data.decode('utf-8')
await self.ws.send_str(data)
__log__.debug(f'NODE {self.identifier!r} | Sent a {op} websocket payload: {payload}')
def get_player(
self,
guild: Union[discord.Guild, discord.Object, int],
cls: type = Player,
must_exist: bool = False,
*args,
**kwargs
) -> Optional[Player]:
"""Gets an existing :class:`.Player`, or if not found, creates it.
Parameters
----------
guild: Union[:class:`discord.Guild`, :class:`discord.Object`, int]
The guild that this player corresponds to.
cls: type, default: :class:`.Player`
The class to cast the player to.
must_exist: bool, default: False
Whether or not to return `None` if the player doesn't already exist.
args
Extra arguments to pass into the class constructor.
kwargs
Extra keyword arguments to pass into the class constructor.
Returns
-------
:class:`.Player`
The player found, or if it didn't exist, created.
"""
if isinstance(guild, int):
guild = discord.Object(guild)
player = self._players.get(guild.id)
if not player:
if must_exist:
return
player = cls(self, self._bot, guild, *args, **kwargs)
self._players[guild.id] = player
return player
if type(player) is not cls:
player = cls(player.node, player.bot, player.guild, *args, **kwargs)
return player
def destroy_player(self, guild: Union[discord.Guild, discord.Object, int]) -> None:
"""Destroys and deletes a player.
Parameters
----------
guild: Union[:class:`discord.Guild`, :class:`discord.Object`, int]
The player's corresponding guild.
"""
if isinstance(guild, int):
guild = discord.Object(guild)
player = self._players.get(guild.id)
if not player:
return
self.loop.create_task(player.destroy())
async def decode_track(self, id: str, /, *, cls: type = Track, **kwargs) -> Optional[Track]:
"""|coro|
Decodes a track given it's Base 64 ID.
Parameters
----------
id: str
The track's ID, usually represented in Base 64.
cls: type, default: :class:`Track`
The class to cast the track to.
kwargs
Extra keyword arguments to pass into the class constructor.
Returns
-------
Optional[:class:`Track`]
The decoded track, if any.
See Also
--------
:meth:`Node.decode_tracks`
"""
return cls(id=id, info=await self.http.decode_track(id), **kwargs)
async def decode_tracks(self, ids: str, /, *, cls: type = Track, **kwargs) -> Optional[List[Track]]:
"""|coro|
Decodes multiple tracks, given their Base 64 ID's.
Parameters
----------
ids: List[str]
A list of base 64 track ID's to decode.
cls: type, default: :class:`Track`
The class to cast the tracks to.
kwargs
Extra keyword arguments to pass into the class constructor.
Returns
-------
Optional[List[:class:`Track`]]
A list of constructed tracks, if any.
See Also
--------
:meth:`Node.decode_track`
"""
tracks = await self.http.decode_tracks(ids)
return [cls(id=id_, info=track, **kwargs) for id_, track in zip(ids, tracks['tracks'])]
@overload
async def search_track(
self,
query: str,
*,
source: Optional[Source] = None,
cls: type = Track,
suppress: bool = False,
**kwargs
) -> Optional[Union[Track, Playlist]]:
...
async def search_track(self, *args, **kwargs):
"""Searches for one single track given a query or URL.
.. warning::
If the track is a direct URL, the `source` kwarg will be ignored.
Parameters
----------
query: str
The search query or URL.
source: Optional[:class:`.Source`]
The source that the track should come from.
cls: type, default: :class:`.Track`
The class to cast the track to.
suppress: bool, default: False
Whether or not to suppress :exc:`.NoSearchMatchesFound`
Returns
-------
Optional[Union[:class:`.Track`, :class:`.Playlist`]]
The track or playlist that was found, if any.
"""
return await self._search.search_track(*args, **kwargs)
@overload
async def search_tracks(
self,
query: str,
*,
source: Optional[Source] = None,
cls: type = Track,
suppress: bool = True,
limit: Optional[int] = None,
**kwargs
) -> Optional[Union[List[Track], Playlist]]:
...
async def search_tracks(self, *args, **kwargs):
"""Searches for multiple tracks given a query or URL.
.. warning::
If the track is a direct URL, the `source` kwarg will be ignored.
.. warning::
If a playlist is found, the return type will not be a list, rather just the playlist itself.
Parameters
----------
query: str
The search query or URL.
source: Optional[:class:`.Source`]
The source that the tracks should come from.
cls: type, default: :class:`.Track`
The class to cast the tracks to.
suppress: bool, default: False
Whether or not to suppress :exc:`.NoSearchMatchesFound`
limit: Optional[int]
The maximum amount of tracks to return.
Returns
-------
Union[List[:class:`.Track`], :class:`.Playlist`]
A list of tracks found, or a playlist.
"""
return await self._search.search_tracks(*args, **kwargs)
class Node(BaseNode, NodeListenerMixin):
"""Represents a connection to Obsidian that manages requests and websockets.
Parameters
----------
bot: :class:`discord.Client`
The client or bot that this node belongs to.
host: str
The IP of the host that Obsidian is running on.
port: str
The port of the host that Obsidian is running on.
password: Optional[str]
The password needed to connect to Obsidian.
identifier: Optional[str]
The name to use to refer to this node. Defaults to `'MAIN'`
region: Optional[:class:`discord.VoiceRegion`]
The voice region this node will be in.
"""
@property
def node(self):
return self # Relavant for NodeListenerMixin
def dispatch(self, player, raw_event: str, event, *args, **kwargs) -> None:
# Temporary solution that made in a PR
try:
x = getattr(player, event)(player, event, *args, **kwargs)
if x:
return
except AttributeError:
pass
#self.bot.dispatch(event, *args, **kwargs)
async def handle_ws_response(self, op: OpCode, data: dict) -> None:
if op is OpCode.STATS:
self.__stats = Stats(data)
return
player = self.get_player(int(data['guild_id']), must_exist=True)
if not player:
return
if op is OpCode.PLAYER_EVENT:
player.dispatch_event(data)
elif op is OpCode.PLAYER_UPDATE:
player.update_state(data)
```
#### File: obsidian.py/obsidian/pool.py
```python
import discord
import aiohttp
import asyncio
from discord.ext import commands
from typing import Dict, Optional, Union, overload
from .node import BaseNode, Node
from .errors import NodeAlreadyExists
from .spotify import SpotifyClient
__all__: tuple = (
'NodePool',
)
Bot = Union[discord.Client, discord.AutoShardedClient, commands.Bot, commands.AutoShardedBot]
class _NodePool:
def __init__(self):
self._nodes: Dict[str, BaseNode] = {}
def __repr__(self) -> str:
return f'NodePool [ {", ".join(map(repr, self._nodes))} ]'
@property
def nodes(self) -> Dict[str, BaseNode]:
return self._nodes
@overload
async def initiate_node(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
cls: type = Node,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
heartbeat: Optional[float] = None,
secure: Optional[bool] = None,
**kwargs
) -> BaseNode:
...
@overload
async def initiate_node(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
cls: type = Node,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
heartbeat: Optional[float] = None,
secure: Optional[bool] = None,
spotify: Optional[SpotifyClient] = None,
**kwargs
) -> BaseNode:
...
@overload
async def initiate_node(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
cls: type = Node,
session: Optional[aiohttp.ClientSession] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
heartbeat: Optional[float] = None,
secure: Optional[bool] = None,
spotify_client_id: Optional[str] = None,
spotify_client_secret: Optional[str] = None,
**kwargs
) -> BaseNode:
...
async def initiate_node(
self,
bot: Bot,
host: str = '127.0.0.1',
port: Union[str, int] = '3030',
password: Optional[str] = None,
identifier: Optional[str] = None,
region: Optional[discord.VoiceRegion] = None,
*,
cls: type = Node,
**kwargs
) -> BaseNode:
if not issubclass(cls, BaseNode):
raise TypeError('Node classes must inherit from BaseNode.')
if identifier in self._nodes:
raise NodeAlreadyExists(identifier)
node = cls(bot, host, port, password, identifier, region, **kwargs)
await node.connect()
self._nodes[node.identifier] = node
return node
def get_node(self, identifier: Optional[str] = None) -> BaseNode:
return self._nodes.get(identifier)
NodePool = _NodePool()
```
#### File: obsidian.py/obsidian/track.py
```python
import discord
from discord.ext import commands
from typing import Any, Dict, List, Optional
from .enums import Source
__all__: tuple = (
'Track',
'Playlist'
)
class Track:
"""
Represents an obsidian song track.
Parameters
----------
id: str
The base 64 track ID.
info: Dict[str, Any]
The raw JSON payload returned by Obsidian,
containing information about this track.
ctx: Optional[:class:`commands.Context`]
An optional context to use for this track.
By default, if this is provided, :attr:`Track.requester` will be the author of the context.
kwargs
Extra keyword arguments to pass into the constructor.
"""
__slots__: tuple = (
'_id',
'_ctx',
'_requester',
'_title',
'_author',
'_uri',
'_identifier',
'_length',
'_position',
'_is_stream',
'_is_seekable',
'_source',
'_thumbnail'
)
def __init__(self, *, id: str, info: Dict[str, Any], ctx: Optional[commands.Context] = None, **kwargs) -> None:
self._ctx: Optional[commands.Context] = ctx
self._requester: Optional[discord.Member] = kwargs.get('requester') or (ctx.author if ctx else None)
self._id: str = id
self._uri: str = info['uri']
self._title: str = info['title']
self._author: str = info['author']
self._length: int = info['length']
self._position: int = info['position']
self._is_stream: bool = info['is_stream']
self._identifier: str = info['identifier']
self._is_seekable: bool = info['is_seekable']
self._source: Source = Source(info['source_name'])
self._thumbnail: Optional[str] = info.get('thumbnail')
def __repr__(self) -> str:
return f'<Track title={self._title!r} uri={self._uri!r} source={self.source!r} length={self._length}>'
@property
def id(self) -> str:
"""
str: The Base64 Track ID, can be used to rebuild track objects.
See Also
--------
:meth:`.Node.decode_track`
"""
return self._id
@property
def ctx(self) -> Optional[commands.Context]:
"""
Optional[:class:`~discord.ext.commands.Context`]: The :class:`~discord.ext.commands.Context`
that invoked the track. Could be `None` .
"""
return self._ctx
@property
def title(self) -> str:
"""
str: The track title.
"""
return self._title
@property
def author(self) -> str:
"""
str: The author of the track.
"""
return self._author
@property
def uri(self) -> str:
"""
str: The track's URI.
"""
return self._uri
@property
def identifier(self) -> str:
"""
str: The tracks identifier.
"""
return self._identifier
@property
def length(self) -> int:
"""
int: The duration of the track in milliseconds.
"""
return self._length
@property
def position(self) -> int:
"""
int: The current position of the track in milliseconds.
"""
return self._position
@property
def stream(self) -> bool:
"""
bool: Whether the track is a stream or not.
"""
return self._is_stream
@property
def seekable(self) -> bool:
"""
bool: If you are able to seek the track's position or not.
"""
return self._is_seekable
@property
def source(self) -> Source:
"""
:class:`Source`: Return an |enum_link| indicates the type of the :class:`.Source` .
"""
return self._source
@property
def thumbnail(self) -> str:
"""
str: Return the image URL of the track's thumbnail, could be an empty :class:`str` depending on the :class:`.Source` .
"""
if self.source is Source.YOUTUBE:
return f'https://img.youtube.com/vi/{self.identifier}/hqdefault.jpg'
if self._thumbnail:
return self._thumbnail
return ''
@property
def requester(self) -> Optional[discord.Member]:
"""
Optional[:class:`discord.Member`]: The :class:`discord.Member` that requested the track.
"""
return self._requester
@ctx.setter
def ctx(self, ctx: commands.Context) -> None:
"""
A utility `function` for changing the :attr:`ctx`.
"""
self._ctx = ctx
@requester.setter
def requester(self, requester: discord.Member) -> None:
"""
A utility `function` for changing the :attr:`requester`.
"""
self._requester = requester
class Playlist:
"""
Represents a playlist of tracks.
"""
def __init__(
self,
*,
info: Dict[str, Any],
tracks: List[Dict[str, Any]],
ctx: Optional[commands.Context] = None,
cls: type = Track,
**kwargs
) -> None:
self._ctx: Optional[commands.Context] = ctx
self._requester: Optional[discord.Member] = kwargs.get('requester') or (ctx.author if ctx else None)
self._name: str = info['name']
self._tracks: List[Dict[str, Any]] = tracks
self._selected_track: int = info.get('selected_track', 0)
self._uri: Optional[str] = info.get('uri')
self.__track_cls: type = cls
self.__constructed_tracks: Optional[List[Track]] = None
self.__kwargs = kwargs
@property
def __track_kwargs(self) -> Dict[str, Any]:
return {
**self.__kwargs,
'requester': self._requester
}
@property
def count(self) -> int:
"""
int: Return the total amount of tracks in the playlist.
"""
return len(self._tracks)
@property
def tracks(self) -> List[Track]:
"""
List[Track]: Return a `list` of :class:`Track` s.
"""
if self.__constructed_tracks is not None:
return self.__constructed_tracks
self.__constructed_tracks = res = [
self.__track_cls(id=track['track'], info=track['info'], ctx=self.ctx, **self.__track_kwargs)
for track in self._tracks
]
return res
@property
def ctx(self) -> Optional[commands.Context]:
"""
Optional[:class:`~discord.ext.commands.Context`]: The :class:`~discord.ext.commands.Context`
that invoked the playlist. Could be `None` .
"""
return self._ctx
@property
def name(self) -> str:
"""
str: The name of the playlist.
"""
return self._name
@property
def selected_track(self) -> Optional[Track]:
"""
The selected track returned by Obsidian, could be `None` .
"""
try:
return self.tracks[self._selected_track]
except IndexError:
return self.tracks[0]
@property
def uri(self) -> Optional[str]:
"""
str: The playlist's URI.
"""
return self._uri
@property
def source(self) -> Source:
"""
:class:`.Source`: Return an |enum_link| indicates the type of the :class:`.Source` .
"""
try:
return self.tracks[0].source
except (IndexError, KeyError):
return Source.YOUTUBE
@property
def requester(self) -> Optional[discord.Member]:
"""
Optional[:class:`discord.Member`]: The :class:`discord.Member` that requested the playlist.
"""
return self._requester
@requester.setter
def requester(self, requester: discord.Member) -> None:
"""
A utility `function` for changing the :attr:`requester`.
"""
self._requester = requester
@ctx.setter
def ctx(self, ctx: commands.Context) -> None:
"""
A utility `function` for changing the :attr:`ctx`.
"""
self._ctx = ctx
def __iter__(self) -> iter:
return iter(self._tracks)
def __len__(self) -> int:
return self.count
def __repr__(self) -> str:
return f'<Playlist name={self._name!r} selected_track={self.selected_track} count={len(self._tracks)}>'
```
#### File: obsidian.py/obsidian/websocket.py
```python
import asyncio
import aiohttp
import logging
from discord.backoff import ExponentialBackoff
from typing import Coroutine, Dict, Optional
from .enums import OpCode
from .errors import ObsidianConnectionFailure, ObsidianAuthorizationFailure
__all__: tuple = (
'Websocket',
)
__log__: logging.Logger = logging.getLogger('obsidian.node')
class Websocket:
def __init__(
self,
node,
session: aiohttp.ClientSession,
loop: asyncio.AbstractEventLoop,
*,
secure: bool = False,
**connect_kwargs
) -> None:
from .node import BaseNode
self._password: str = <PASSWORD>
self._bot_user_id: str = str(node.bot.user.id)
self._session: aiohttp.ClientSession = session
self._loop: asyncio.AbstractEventLoop = loop
self.__node: BaseNode = node
self.__secure: bool = secure
self.__ws: Optional[aiohttp.ClientWebSocketResponse] = None
self.__internal__ = connect_kwargs
def __repr__(self) -> str:
return f'<Websocket node={self.__node.identifier!r}>'
@property
def url(self) -> str:
ws = 'wss' if self.__secure else 'ws'
return f'{ws}://{self.__node.host}:{self.__node.port}/magma'
@property
def headers(self) -> Dict[str, any]:
return {
'Authorization': self._password,
'User-Id': self._bot_user_id,
'Client-Name': 'Obsidian'
}
@property
def connected(self) -> bool:
return self.__ws is not None and not self.__ws.closed
@property
def _ws(self) -> aiohttp.ClientWebSocketResponse:
return self.__ws
async def connect(self) -> aiohttp.ClientWebSocketResponse:
identifier = self.__node.identifier
try:
ws = await self._session.ws_connect(self.url, headers=self.headers, **self.__internal__)
except aiohttp.WSServerHandshakeError as exc:
if exc.status == 4001:
__log__.error(f'NODE {identifier!r} | Failed to authorize')
raise ObsidianAuthorizationFailure(self.__node)
raise Exception(exc)
except Exception as exc:
__log__.fatal(f'NODE {identifier!r} | Failed to connect')
raise ObsidianConnectionFailure(self.__node, exc)
else:
self.__node.__task = self._loop.create_task(self.listen())
self.__node.dispatch_event('obsidian_node_ready', self.__node)
self.__ws = ws
__log__.info(f'NODE {identifier!r} | Connection successful')
return ws
async def disconnect(self) -> None:
await self.__ws.close()
async def listen(self) -> None:
backoff = ExponentialBackoff(base=7)
while True:
payload = await self.__ws.receive()
if payload.type is aiohttp.WSMsgType.CLOSED:
retry = backoff.delay()
__log__.warning(f'NODE {self.__node.identifier!r} | Websocket is closed, attempting reconnection in {retry:.2f} seconds.')
await asyncio.sleep(retry)
if not self.connected:
self._loop.create_task(self.connect())
else:
data = payload.json()
try:
op = OpCode(data['op'])
except ValueError:
__log__.warning(f'NODE {self.__node.identifier!r} | Received payload with invalid operation code: {data}')
continue
else:
__log__.debug(f'NODE {self.__node.identifier!r} | Received payload with op-code {op!r}: {data}')
self._loop.create_task(self.__node.handle_ws_response(op, data['d']))
def send_str(self, data: str, compress: Optional[int] = None) -> Coroutine[None, None, None]:
return self.__ws.send_str(data, compress)
``` |
{
"source": "jottew/Rodo",
"score": 2
} |
#### File: Rodo/cogs/events.py
```python
import defectio
import utils
import core
import re
from core.bot import Rodo
from defectio.ext import commands
from core.context import Context
class Events(core.Cog):
def __init__(self, bot):
self.bot: Rodo = bot
self.BadArgumentRegex = re.compile(r"Converting to \"(.+)\" failed for parameter \"(.+)\"\.")
self.BadArgumentRegex2 = re.compile(r"with base \d+: \'?([^\']+)\'?")
@commands.Cog.listener(name="on_command_error")
async def on_command_error(self, ctx: Context, error: Exception):
if isinstance(error, commands.CommandInvokeError):
error = error.original
if ctx.command is not None:
if hasattr(ctx.command, "on_error") or hasattr(ctx.bot.get_cog(ctx.command.cog_name), "on_error"):
return
if isinstance(error, commands.CommandOnCooldown):
if await ctx.bot.is_owner(ctx.author):
return await ctx.reinvoke()
return await ctx.send(f"❌ Try again in {utils.time(int(error.retry_after))}")
if isinstance(error, commands.MissingRequiredArgument):
param = str(error.param).split(":")[0].replace(" ", "")
return await ctx.send(f"❌ **{param}** is a required argument that is missing")
if isinstance(error, commands.MaxConcurrencyReached):
return await ctx.send(f"❌ {error.args[0]}")
if isinstance(error, commands.MissingRequiredFlag):
return await ctx.send(f"❌ **{error.flag.name}** is a required flag that is missing")
if isinstance(error, commands.BadArgument):
matches = self.BadArgumentRegex.findall(str(error))[0]
type_ = matches[0]
param = matches[1]
actual = self.BadArgumentRegex2.findall(str(error.__cause__))[0]
types = {
"int": "number",
"float": "number"
}
type_ = types.get(type_, type_)
return await ctx.send(f"❌ Expected a {type_} for {param} parameter, but got `{actual}`")
if isinstance(error, commands.MemberNotFound):
return await ctx.send("❌ That member was not found")
if isinstance(error, commands.BadUnionArgument):
return await ctx.send("❌ Invalid parameters passed")
if isinstance(error, commands.MissingPermissions):
if await ctx.bot.is_owner(ctx.author):
return await ctx.reinvoke()
perms = utils.format_list(error.missing_permissions, seperator="and", brackets="`")
return await ctx.send(f"❌ You are missing {perms} permissions")
if isinstance(error, commands.BotMissingPermissions):
perms = utils.format_list(error.missing_permissions, seperator="and", brackets="`")
return await ctx.send(f"❌ I am missing {perms} permissions")
if isinstance(error, commands.NotOwner):
return await ctx.send(f"❌ Only my owner can do that >:(")
if isinstance(
error, (
commands.CheckAnyFailure,
commands.CheckFailure,
commands.CommandNotFound)):
return
await ctx.send(
f"Some weird error occured, you can send it to my developer, if you want\n```py\n{error.__class__.__name__}: {str(error)}\n```"
)
raise error
def setup(bot):
bot.add_cog(Events(bot))
```
#### File: Rodo/utils/converters.py
```python
import ast
import re
MENTION_REGEX = re.compile(r"<@(!?)([0-9]*)>")
def iter_dict(d: dict):
stack = list(d.items())
visited = {}
while stack:
k, v = stack.pop()
if isinstance(v, dict):
for i, x in v.items():
visited[i] = x
else:
visited[k] = v
return visited
def format_list(items: list, seperator: str = "or", brackets: str = ""):
if len(items) < 2:
return f"{brackets}{items[0]}{brackets}"
new_items = []
for i in items:
if not re.match(MENTION_REGEX, i):
new_items.append(f"{brackets}{i}{brackets}")
else:
new_items.append(i)
msg = ", ".join(list(new_items)[:-1]) + f" {seperator} " + list(new_items)[-1]
return msg
def time(miliseconds: int, seconds: bool = False, *args, **kwargs):
if seconds:
miliseconds = miliseconds*1000
if not isinstance(miliseconds, int):
raise TypeError(f"argument \"miliseconds\" requires int, not {miliseconds.__class__.__name__}")
seconds = int(miliseconds / 1000)
minutes = int(seconds / 60)
hours = int(minutes / 60)
days = int(hours / 24)
for _ in range(seconds):
miliseconds -= 1000
for _ in range(minutes):
seconds -= 60
for _ in range(hours):
minutes -= 60
for _ in range(days):
hours -= 24
if days > 0:
time = [f"{days}d", f"{hours}h", f"{minutes}m", f"{seconds}s"]
elif hours > 0:
time = [f"{hours}h", f"{minutes}m", f"{seconds}s"]
elif minutes > 0:
time = [f"{minutes}m", f"{seconds}s"]
elif seconds > 0:
time = [f"{seconds}s"]
else:
time = [f"{miliseconds}ms"]
result = []
for v in time:
if not str(v).startswith("0"):
result.append(v)
if not result:
result = ["0ms"]
seperator = kwargs.pop("seperator", "and")
return format_list(result, seperator, *args, **kwargs)
``` |
{
"source": "jottew/smol-shibe",
"score": 2
} |
#### File: smol-shibe/utils/checks.py
```python
from discord.ext import commands
def is_general():
def predicate(ctx):
channel = 845243855648456734
if ctx.channel.id != channel:
raise commands.CheckFailure(f"This command is only for <#{channel}>")
return True
return commands.check(predicate)
```
#### File: smol-shibe/utils/context.py
```python
from discord.ext import commands
class Context(commands.Context):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.color = int(self.bot.config["COLOR"], 16)
async def send(self, content: str = None, reply: bool = True, *args, **kwargs):
if content is not None:
if len(content) > 2000:
content = str(await self.bot.myst.post(content))
if reply:
return await super().reply(content, *args, **kwargs)
return await super().send(content, *args, **kwargs)
```
#### File: smol-shibe/utils/converters.py
```python
import re
MENTION_REGEX = re.compile(r"<@(!?)([0-9]*)>")
def format_list(items: list, seperator: str = "or", brackets: str = ""):
if len(items) < 2:
return f"{brackets}{items[0]}{brackets}"
new_items = []
for i in items:
if not re.match(MENTION_REGEX, i):
new_items.append(f"{brackets}{i}{brackets}")
else:
new_items.append(i)
msg = ", ".join(list(new_items)[:-1]) + f" {seperator} " + list(new_items)[-1]
return msg
def time(miliseconds: int, seconds: bool = False):
if seconds:
miliseconds = miliseconds*1000
if not isinstance(miliseconds, int):
raise TypeError(f"argument \"seconds\" requires int, not {miliseconds.__class__.__name__}")
seconds = int(miliseconds / 1000)
minutes = int(seconds / 60)
hours = int(minutes / 60)
days = int(hours / 24)
for _ in range(seconds):
miliseconds -= 1000
for _ in range(minutes):
seconds -= 60
for _ in range(hours):
minutes -= 60
for _ in range(days):
hours -= 24
if days > 0:
time = [f"{days}d", f"{hours}h", f"{minutes}m", f"{seconds}s", f"{miliseconds}ms"]
elif hours > 0:
time = [f"{hours}h", f"{minutes}m", f"{seconds}s", f"{miliseconds}ms"]
elif minutes > 0:
time = [f"{minutes}m", f"{seconds}s", f"{miliseconds}ms"]
elif seconds > 0:
time = [f"{seconds}s", f"{miliseconds}ms"]
else:
time = [f"{miliseconds}ms"]
result = []
for v in time:
if not str(v).startswith("0"):
result.append(v)
return format_list(result, seperator="and", brackets="`")
```
#### File: smol-shibe/utils/executor.py
```python
import functools
import asyncio
def executor(executor = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
partial = functools.partial(func, *args, **kwargs)
loop = asyncio.get_running_loop()
return loop.run_in_executor(executor, partial)
return wrapper
return decorator
``` |
{
"source": "JottoWorol/devops",
"score": 3
} |
#### File: JottoWorol/devops/AppTest.py
```python
import datetime
import unittest
import app
class AppTest(unittest.TestCase):
def test_correct_time(self):
self.assertEqual(app.get_current_time(),
str(datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=3))).strftime("%Y-%m-%dT%H:%M:%S")))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jottr/mkdocs-mermaid-plugin",
"score": 2
} |
#### File: jottr/mkdocs-mermaid-plugin/setup.py
```python
import os
from setuptools import setup, find_packages
def readme():
"""print long description"""
with open('README.md') as f:
return f.read()
long_description = (
"This is a mkdocs plugin that could enable the mermaid graph in the markdown file."
"Please follow the instruction in reame to enable this plugin"
)
setup(
name='mkdocs-mermaid-plugin',
version='0.1.1',
description='A MkDocs plugin that support mermaid graph in markdown file',
long_description=long_description,
keywords='mkdocs python markdown mermaid',
url='https://github.com/pugong/mkdocs-mermaid-plugin',
author='pugong',
author_email='<EMAIL>',
license='MIT',
python_requires='>=2.7',
install_requires=[
'mkdocs>=0.17'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(exclude=['*.tests']),
entry_points={
'mkdocs.plugins': [
'markdownmermaid = markdownmermaid.plugin:MarkdownMermaidPlugin'
]
}
)
``` |
{
"source": "jottyVlad/EnjAlice",
"score": 2
} |
#### File: EnjAlice/example/blueprints_example.py
```python
from aiohttp import web
from blueprints import bps, bp as full_bp # noqa
from enjalice.routers import Dispatcher
from enjalice.request import AliceRequest
from enjalice.response import text
from example.aiohttp_server import app
async def start_handler(_: AliceRequest):
return text(
msg="Это пример работы с blueprints"
)
dp = Dispatcher(start_handler)
for bp in bps:
dp.register_blueprint(bp)
# Or
# dp.register_blueprint(full_bp)
if __name__ == '__main__':
app['dp'] = dp
web.run_app(app, host="127.0.0.1", port=8888)
```
#### File: EnjAlice/tests/test_intent_handler.py
```python
from random import randint
from time import sleep
from secrets import token_hex
import pytest
from enjalice.intent_handler import IntentHandler, IntentHandlersCollection
from enjalice.response import AliceResponse
COLLECTION_SIZE = 16
@pytest.fixture
def collection():
c = IntentHandlersCollection()
def handler():
return AliceResponse()
for i in range(COLLECTION_SIZE):
c.add(
IntentHandler(
handler=handler,
name=f"dummy_{token_hex(8)}",
priority=randint(0, 100)
)
)
return c
def test_order(collection: IntentHandlersCollection):
last_priority = float("inf")
for handler in collection:
assert last_priority >= handler.priority,\
"Order in IntentHandlersCollection is BROKEN"
last_priority = handler.priority
def test_contains(collection: IntentHandlersCollection):
for i in collection:
assert i in collection,\
"__contains__ method of IntentHandlersCollection is BROKEN"
def test_discard(collection: IntentHandlersCollection):
for i in collection:
collection.discard(i)
assert i not in collection,\
"discard method of IntentHandlersCollection is BROKEN"
def test_len(collection: IntentHandlersCollection):
assert len(collection) == COLLECTION_SIZE
``` |
{
"source": "jotunskij/flaskan",
"score": 2
} |
#### File: flaskan/routes/api.py
```python
from functools import wraps
from util import group_required_api, log_in_user, TOKEN_BLACKLIST
from flask import (
request, jsonify, Blueprint
)
from flask_jwt_extended import (
jwt_required, create_access_token,
get_jwt_identity, create_refresh_token,
jwt_refresh_token_required,
set_access_cookies, set_refresh_cookies,
get_raw_jwt
)
from flask_cors import cross_origin
api_routes = Blueprint('api_routes', __name__)
@api_routes.route('/api/login', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'],
supports_credentials=True)
def api_login():
if not request.is_json:
return jsonify({"error": "Missing JSON in request"}), 400
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username:
return jsonify({"error": "Missing username parameter"}), 400
if not password:
return jsonify({"error": "Missing password parameter"}), 400
user = log_in_user(username, password)
if not user:
return jsonify({"error": f"Bad username and/or password"}), 400
identity = {
'user_id': user['user_id'],
'groups': user['groups']
}
response = jsonify({'loggedIn': True})
access_token = create_access_token(identity)
refresh_token = create_refresh_token(identity)
set_access_cookies(response, access_token)
set_refresh_cookies(response, refresh_token)
return response, 200
@api_routes.route('/api/token/refresh', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'],
supports_credentials=True)
@jwt_refresh_token_required
def api_refresh_token():
identity = get_jwt_identity()
response = jsonify({'refreshed': True})
access_token = create_access_token(identity=identity)
set_access_cookies(response, access_token)
return response, 200
# Endpoint for revoking the current users access token
@api_routes.route('/api/logout', methods=['DELETE'])
@cross_origin(headers=['Content-Type', 'Authorization'],
supports_credentials=True)
@jwt_required
def logout():
jti = get_raw_jwt()['jti']
TOKEN_BLACKLIST.add(jti)
return jsonify({"msg": "Successfully logged out"}), 200
# Endpoint for revoking the current users refresh token
@api_routes.route('/api/logout-refresh', methods=['DELETE'])
@cross_origin(headers=['Content-Type', 'Authorization'],
supports_credentials=True)
@jwt_refresh_token_required
def logout_refresh():
jti = get_raw_jwt()['jti']
TOKEN_BLACKLIST.add(jti)
return jsonify({"msg": "Successfully logged out"}), 200
@api_routes.route('/api/admin', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'],
supports_credentials=True)
@jwt_required
@group_required_api(group='admin')
def api_admin():
return jsonify({'data': 'admin'}), 200
@api_routes.route('/api/user', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'],
supports_credentials=True)
@jwt_required
def api_user():
response = jsonify({'data': 'user'})
return response, 200
```
#### File: flaskan/routes/public.py
```python
from flask import (
render_template, send_from_directory,
Blueprint, request, redirect, url_for,
make_response
)
from flask_jwt_extended import (
set_access_cookies, set_refresh_cookies,
unset_jwt_cookies, jwt_optional,
get_raw_jwt, get_jwt_identity,
create_access_token, create_refresh_token,
jwt_refresh_token_required, verify_jwt_in_request
)
import requests
import util
public_routes = Blueprint('public_routes', __name__)
@public_routes.route('/static/<path:path>')
def get_js(path):
return send_from_directory('static', path)
@public_routes.route('/', methods=['GET'])
def index():
return render_template('public/index.html')
@public_routes.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('public/login.html')
else:
next_url = '.index'
if request.form['next_url']:
next_url = request.form['next_url']
api_response = util.api_login()
response = make_response(redirect(url_for(next_url)))
for name, value in api_response.cookies.items():
response.set_cookie(name, value)
return response
@public_routes.route('/logout', methods=['GET'])
def logout():
try:
verify_jwt_in_request()
except Exception as ex:
print(ex)
util.api_logout((get_raw_jwt() or {}).get("csrf"))
response = make_response(redirect(url_for('public_routes.logout_refresh')))
return response
@public_routes.route('/logout-refresh', methods=['GET'])
@jwt_refresh_token_required
def logout_refresh():
util.api_logout_refresh((get_raw_jwt() or {}).get("csrf"))
response = make_response(render_template(
'public/login.html', error='Du är nu utloggad'
))
unset_jwt_cookies(response)
return response
``` |
{
"source": "joturako/GearVRf-docs",
"score": 3
} |
#### File: joturako/GearVRf-docs/publish.py
```python
import os
import shutil
import subprocess
import glob
import argparse
def get_curr_path():
return os.path.dirname(os.path.realpath(__file__))
def which(program):
def is_exe(cmd_path):
return os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK)
def ext_candidates(cmd_path):
yield cmd_path
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield cmd_path + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
def copy_all(src, dst):
for filename in glob.glob(os.path.join(src, '*.*')):
shutil.copy(filename, dst)
def copy_tree(src, dst):
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.exists(src):
shutil.copy(src, dst)
def del_tree(path):
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
else:
print 'Invalid path: ' + path
def update_version(file_path, version_num):
fh = open(file_path, 'r')
html_content = fh.read()
from string import Template
s = Template(html_content)
replaced_content = s.substitute(gvrf_version=version_num)
fh = open(file_path, 'w')
fh.write(replaced_content)
fh.close()
def gen_javadoc(src_path, out_path, package_name):
cmd = ['javadoc', '-Xdoclint:none']
cmd.extend(['-d', out_path])
cmd.extend(['-sourcepath', src_path])
cmd.extend(['-subpackages', package_name])
cmd.extend(['-encoding', 'UTF-8'])
cmd.extend(['-charset', 'UTF-8'])
cmd.append('-quiet')
subprocess.call(cmd)
def update_template(out_path, version_num):
curr_path = get_curr_path()
full_out_path = os.path.join(curr_path, out_path)
version_out_path = os.path.join(full_out_path, version_num)
index2_path = os.path.join(full_out_path, 'index2.html')
update_version(index2_path, version_num)
java_doc_index_path = os.path.join(version_out_path, 'index.html')
update_version(java_doc_index_path, version_num)
def gen_java_docs(base_path, out_path):
del_tree(out_path)
# Generate frameworks
sub_out_path = os.path.join(out_path, 'Framework')
src_path = os.path.join(base_path, 'GVRF', 'Framework', 'framework', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'org.gearvrf')
# Generate 3DCursor
sub_out_path = os.path.join(out_path, '3DCursor')
src_path = os.path.join(base_path, 'GVRf', 'Extensions', '3DCursor', '3DCursorLibrary', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'org.gearvrf')
# Generate DebugWebServer
sub_out_path = os.path.join(out_path, 'DebugWebServer')
src_path = os.path.join(base_path, 'GVRf', 'Extensions', 'DebugWebServer', 'debugwebserver', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'smcl.samsung')
# Generate SceneSerializer
sub_out_path = os.path.join(out_path, 'SceneSerializer')
src_path = os.path.join(base_path, 'GVRf', 'Extensions', 'SceneSerializer', 'sceneserializer', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'org.gearvrf')
# Generate WidgetPlugin
sub_out_path = os.path.join(out_path, 'WidgetPlugin')
src_path = os.path.join(base_path, 'GVRf', 'Extensions', 'WidgetPlugin', 'widgetplugin', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'org.gearvrf')
# Generate gvrf-physics
sub_out_path = os.path.join(out_path, 'gvrf-physics')
src_path = os.path.join(base_path, 'GVRf', 'Extensions', 'gvrf-physics', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'org.gearvrf')
# Generate particle system
sub_out_path = os.path.join(out_path, 'gvrf-particlesystem')
src_path = os.path.join(base_path, 'GVRf', 'Extensions', 'gvrf-particlesystem', 'src', 'main', 'java')
gen_javadoc(src_path, sub_out_path, 'org.gearvrf')
def gen_all_docs(out_path, api_template_path, version_num):
# Check required commands
javadoc_path = which('javadoc')
if javadoc_path is None:
print '==> Error: Failed to find javadoc, please check your java setup'
return
mkdocs_path = which('mkdocs')
if mkdocs_path is None:
print '==> Error: Failed to find mkdocs, please follow the Readme to set it up'
return
del_tree(out_path)
copy_tree(api_template_path, out_path)
# Search for GVRF folder
# Search for GVRF_SOURCE_PATH
gvrf_path = os.environ.get('GVRF_SOURCE_PATH')
curr_path = get_curr_path()
full_out_path = os.path.join(curr_path, out_path, version_num)
template_path = os.path.join(curr_path, api_template_path, 'template')
print "==> Setting up environment"
if gvrf_path is None:
# Search at the parent dir
parent_path = os.path.dirname(curr_path)
gvrf_path = os.path.join(parent_path, 'GearVRf')
# Generate all java docs
print '==> generate javadoc'
if os.path.isdir(gvrf_path):
gen_java_docs(gvrf_path, full_out_path)
else:
print "==> Invalid GVRF path: " + gvrf_path
# copy template
print '==> copy template'
copy_all(template_path, full_out_path)
# Update versions in template
update_template(out_path, version_num)
def main():
parser = argparse.ArgumentParser(description='Generate the documentation site for GearVR Framework')
parser.add_argument('-v', metavar='Version', dest='version', help='specify GVRF version', default='v3.3')
parser.add_argument('-deploy', metavar='Deploy', dest='deploy', help='specify deploy target: github')
args = parser.parse_args()
if not args.version.startswith('v'):
args.version = 'v' + args.version
print '=> GVRF version: ' + args.version
# Generate site with mkdocs
from subprocess import call
call(['mkdocs', 'build'])
print '=> Generating Documentation site'
# Generate API reference from GVRF source
print '=> Generating API reference site'
gen_all_docs('temp', 'api_reference', args.version)
# Copy api_reference and replace the placeholder api_reference in site
print '=> Merging API reference with documentation'
if os.path.isdir('site'):
del_tree('site/api_reference')
copy_tree('temp', 'site/api_reference')
print '==> Add API reference'
else:
print '=> Error: Failed to find site directory please make sure mkdocs is setup correctly'
return
if args.deploy == 'github':
print '=> Deploy to github'
from deploy import gh_deploy
gh_deploy()
if __name__ == "__main__":
# main()
main()
``` |
{
"source": "jotzt/node-management",
"score": 3
} |
#### File: jotzt/node-management/inventory.py
```python
import urllib
import json
import ipaddress
import argparse
def output_list_inventory(json_output):
'''
Output the --list data structure as JSON
'''
print(json.dumps(json_output))
def find_host(search_host, inventory):
'''
Find the given variables for the given host and output them as JSON
'''
host_attribs = inventory.get(search_host, {})
print(json.dumps(host_attribs))
def get_nodes():
json_url = "http://map.freifunk-duesseldorf.de/nodes.json"
response = urllib.urlopen(json_url)
nodes = json.loads(response.read().decode("utf-8"))['nodes']
return nodes
def build_inventory():
hosts = []
for i, node in get_nodes().items():
ip = get_ips_from_node(node)
if ip is not False:
hosts.append(ip)
return {
"nodes": {
"hosts": hosts,
"vars": {}
}
}
def get_global_ip(ip_adresses):
ip = None
for address in ip_adresses:
if ipaddress.ip_address(address).is_global:
ip = address
if ip is None:
return False
return ip
def get_ips_from_node(node):
if 'addresses' not in node['nodeinfo']['network']:
return False
return get_global_ip(node['nodeinfo']['network']['addresses'])
def main():
'''
Ansible dynamic inventory experimentation
Output dynamic inventory as JSON from statically defined data structures
'''
# Argument parsing
parser = argparse.ArgumentParser(description="Ansible dynamic inventory")
parser.add_argument("--list",
help="Ansible inventory of all of the groups",
action="store_true", dest="list_inventory")
parser.add_argument("--host",
help="Ansible inventory of a particular host",
action="store",
dest="ansible_host", type=str)
cli_args = parser.parse_args()
list_inventory = cli_args.list_inventory
ansible_host = cli_args.ansible_host
if list_inventory:
ANSIBLE_INV = build_inventory()
output_list_inventory(ANSIBLE_INV)
if ansible_host:
print("{}")
if __name__ == "__main__":
main()
``` |
{
"source": "Jou0us/NumCpp",
"score": 2
} |
#### File: test/pytest/test_functions.py
```python
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumprod(data, axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumprod(data, axis=1))
####################################################################################
def test_nancumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumsum(data, axis=None))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumsum(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumsum(data, axis=1))
####################################################################################
def test_nanmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmax(cArray, NumCpp.Axis.NONE).item() == np.nanmax(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmax(data, axis=1))
####################################################################################
def test_nanmean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmean(cArray, NumCpp.Axis.NONE).item() == np.nanmean(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmean(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmean(data, axis=1))
####################################################################################
def test_nanmedian():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert (NumCpp.nanmedian(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() ==
np.nanmedian(data, axis=None).item())
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[0].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
# np.nanmedian(data, axis=0))
#
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[1].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
# np.nanmedian(data, axis=1))
####################################################################################
def test_nanmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmin(cArray, NumCpp.Axis.NONE).item() == np.nanmin(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmin(data, axis=1))
####################################################################################
def test_nanpercentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_nanprod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanprod(cArray, NumCpp.Axis.NONE).item() == np.nanprod(data, axis=None)
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nanprod(data, axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nanprod(data, axis=1))
####################################################################################
def test_nans():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.nansSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.nansRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.nansShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
####################################################################################
def test_nans_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.nans_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(np.isnan(cArray2.getNumpyArray())))
####################################################################################
def test_nanstd():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.nanstd(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=1), 9))
####################################################################################
def test_nansum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nansum(cArray, NumCpp.Axis.NONE).item() == np.nansum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nansum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nansum(data, axis=1))
####################################################################################
def test_nanvar():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanvar(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.nanvar(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=1), 8))
####################################################################################
def test_nbytes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 8
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 16
####################################################################################
def test_negative():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
####################################################################################
def test_newbyteorderArray():
value = np.random.randint(1, 100, [1, ]).item()
assert (NumCpp.newbyteorderScaler(value, NumCpp.Endian.BIG) ==
np.asarray([value], dtype=np.uint32).newbyteorder().item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.newbyteorderArray(cArray, NumCpp.Endian.BIG),
data.newbyteorder())
####################################################################################
def test_none():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
####################################################################################
def test_nonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
####################################################################################
def test_norm():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).flatten() == np.linalg.norm(data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data.transpose()):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.COL).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
assert norms is not None
####################################################################################
def test_not_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
####################################################################################
def test_ones():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 1))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
####################################################################################
def test_ones_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(1, 0)))
####################################################################################
def test_outer():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
####################################################################################
def test_pad():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item()
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray = NumCpp.NdArray(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray = NumCpp.NdArrayComplexDouble(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
####################################################################################
def test_partition():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
####################################################################################
def test_proj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert NumCpp.projScaler(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cData = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cData.setArray(data)
assert NumCpp.projArray(cData) is not None
####################################################################################
def test_ptp():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.COL).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=1))
####################################################################################
def test_put():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), np.uint32)
value = np.random.randint(1, 500)
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cIndices.setArray(indices)
NumCpp.put(cArray, cIndices, value)
data.put(indices, value)
assert np.array_equal(cArray.getNumpyArray(), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), dtype=np.uint32)
values = np.random.randint(1, 500, [numIndices, ])
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cValues = NumCpp.NdArray(1, numIndices)
cIndices.setArray(indices)
cValues.setArray(values)
NumCpp.put(cArray, cIndices, cValues)
data.put(indices, values)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_rad2deg():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.rad2degScaler(value), 9) == np.round(np.rad2deg(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rad2degArray(cArray), 9), np.round(np.rad2deg(data), 9))
####################################################################################
def test_radians():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.radiansScaler(value), 9) == np.round(np.radians(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.radiansArray(cArray), 9), np.round(np.radians(data), 9))
####################################################################################
def test_ravel():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
cArray2 = NumCpp.ravel(cArray)
assert np.array_equal(cArray2.getNumpyArray().flatten(), np.ravel(data))
####################################################################################
def test_real():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.realScaler(value), 9) == np.round(np.real(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.realArray(cArray), 9), np.round(np.real(data), 9))
####################################################################################
def test_reciprocal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
imag = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
####################################################################################
def test_remainder():
# numpy and cmath remainders are calculated differently, so convert for testing purposes
values = np.random.rand(2) * 100
values = np.sort(values)
res = NumCpp.remainderScaler(values[1].item(), values[0].item())
if res < 0:
res += values[0].item()
assert np.round(res, 9) == np.round(np.remainder(values[1], values[0]), 9)
# numpy and cmath remainders are calculated differently, so convert for testing purposes
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols) * 100 + 10
data2 = data1 - np.random.rand(shape.rows, shape.cols) * 10
cArray1.setArray(data1)
cArray2.setArray(data2)
res = NumCpp.remainderArray(cArray1, cArray2)
res[res < 0] = res[res < 0] + data2[res < 0]
assert np.array_equal(np.round(res, 9), np.round(np.remainder(data1, data2), 9))
####################################################################################
def test_replace():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
####################################################################################
def test_reshape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = data.size
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(1, newShape))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshapeList(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumCols = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, -1, newNumCols)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(-1, newNumCols))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumRows = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, newNumRows, -1)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(newNumRows, -1))
####################################################################################
def test_resize():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeFast(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeSlow(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
####################################################################################
def test_right_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.right_shift(cArray, bitsToshift).getNumpyArray(),
np.right_shift(data, bitsToshift))
####################################################################################
def test_rint():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.rintScaler(value) == np.rint(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.rintArray(cArray), np.rint(data))
####################################################################################
def test_rms():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
####################################################################################
def test_roll():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, data.size, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.NONE).getNumpyArray(),
np.roll(data, amount, axis=None))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.cols, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.ROW).getNumpyArray(),
np.roll(data, amount, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.rows, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.COL).getNumpyArray(),
np.roll(data, amount, axis=1))
####################################################################################
def test_rot90():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(1, 4, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.rot90(cArray, amount).getNumpyArray(), np.rot90(data, amount))
####################################################################################
def test_round():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.roundScaler(value, 10) == np.round(value, 10)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.roundArray(cArray, 9), np.round(data, 9))
####################################################################################
def test_row_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.row_stack(cArray1, cArray2, cArray3, cArray4),
np.row_stack([data1, data2, data3, data4]))
####################################################################################
def test_setdiff1d():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
####################################################################################
def test_shape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.shape().rows == shape.rows and cArray.shape().cols == shape.cols
####################################################################################
def test_sign():
value = np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
value = np.random.randn(1).item() * 100 + 1j * np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
####################################################################################
def test_signbit():
value = np.random.randn(1).item() * 100
assert NumCpp.signbitScaler(value) == np.signbit(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signbitArray(cArray), np.signbit(data))
####################################################################################
def test_sin():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
####################################################################################
def test_sinc():
value = np.random.randn(1)
assert np.round(NumCpp.sincScaler(value.item()), 9) == np.round(np.sinc(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sincArray(cArray), 9), np.round(np.sinc(data), 9))
####################################################################################
def test_sinh():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
value = np.random.randn(1).item() + 1j * np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randn(shape.rows, shape.cols) + 1j * np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
####################################################################################
def test_size():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.size() == shapeInput.prod().item()
####################################################################################
def test_sort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
####################################################################################
def test_sqrt():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
####################################################################################
def test_square():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
####################################################################################
def test_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.vstack([data1, data2, data3, data4]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_stdev():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.stdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.std(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_subtract():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
####################################################################################
def test_sumo():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
####################################################################################
def test_swap():
shapeInput1 = np.random.randint(20, 100, [2, ])
shapeInput2 = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]).astype(np.double)
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
NumCpp.swap(cArray1, cArray2)
assert (np.array_equal(cArray1.getNumpyArray(), data2) and
np.array_equal(cArray2.getNumpyArray(), data1))
####################################################################################
def test_swapaxes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.swapaxes(cArray).getNumpyArray(), data.T)
####################################################################################
def test_tan():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
####################################################################################
def test_tanh():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
####################################################################################
def test_tile():
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileRectangle(cArray, shapeR.rows, shapeR.cols), np.tile(data, shapeRepeat))
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileShape(cArray, shapeR), np.tile(data, shapeRepeat))
####################################################################################
def test_tofile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.bin')
NumCpp.tofile(cArray, filename, '')
assert os.path.exists(filename)
data2 = np.fromfile(filename, np.double).reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.txt')
NumCpp.tofile(cArray, filename, '\n')
assert os.path.exists(filename)
data2 = np.fromfile(filename, dtype=np.double, sep='\n').reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
####################################################################################
def test_toStlVector():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
out = np.asarray(NumCpp.toStlVector(cArray))
assert np.array_equal(out, data.flatten())
####################################################################################
def test_trace():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows, [1, ]).item()
assert np.array_equal(NumCpp.trace(cArray, offset, NumCpp.Axis.ROW), data.trace(offset, axis1=1, axis2=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows, [1, ]).item()
assert np.array_equal(NumCpp.trace(cArray, offset, NumCpp.Axis.COL), data.trace(offset, axis1=0, axis2=1))
####################################################################################
def test_transpose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.transpose(cArray).getNumpyArray(), np.transpose(data))
####################################################################################
def test_trapz():
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), 1)
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())])
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.NONE).item()
integralPy = np.trapz(data, dx=dx)
assert np.round(integralC, 8) == np.round(integralPy, 8)
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x - coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.ROW).flatten()
integralPy = np.trapz(data, dx=dx, axis=0)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x - coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.COL).flatten()
integralPy = np.trapz(data, dx=dx, axis=1)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(1, np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())])
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.NONE).item()
integralPy = np.trapz(data, x=dx)
assert np.round(integralC, 8) == np.round(integralPy, 8)
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.ROW).flatten()
integralPy = np.trapz(data, x=dx, axis=0)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.COL).flatten()
integralPy = np.trapz(data, x=dx, axis=1)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
####################################################################################
def test_tril():
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilSquare(squareSize, offset),
np.tri(squareSize, k=offset))
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilSquareComplex(squareSize, offset),
np.tri(squareSize, k=offset) + 1j * np.zeros([squareSize, squareSize]))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput) // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilRect(shapeInput[0].item(), shapeInput[1].item(), offset),
np.tri(shapeInput[0].item(), shapeInput[1].item(), k=offset))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput) // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilRectComplex(shapeInput[0].item(), shapeInput[1].item(), offset),
np.tri(shapeInput[0].item(), shapeInput[1].item(), k=offset) + 1j * np.zeros(shapeInput))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilArray(cArray, offset),
np.tril(data, k=offset))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilArray(cArray, offset),
np.tril(data, k=offset))
####################################################################################
def test_triu():
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuSquare(squareSize, offset),
np.tri(squareSize, k=-offset).T)
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuSquareComplex(squareSize, offset),
np.tri(squareSize, k=-offset).T + 1j * np.zeros([squareSize, squareSize]))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput), [1, ]).item()
# NOTE: numpy triu appears to have a bug... just check that NumCpp runs without error
assert NumCpp.triuRect(shapeInput[0].item(), shapeInput[1].item(), offset) is not None
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput), [1, ]).item()
# NOTE: numpy triu appears to have a bug... just check that NumCpp runs without error
assert NumCpp.triuRectComplex(shapeInput[0].item(), shapeInput[1].item(), offset) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuArray(cArray, offset), np.triu(data, k=offset))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuArray(cArray, offset), np.triu(data, k=offset))
####################################################################################
def test_trim_zeros():
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'f').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'f'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'f').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'f'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'b').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'b'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'b').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'b'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'fb').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'fb'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'fb').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'fb'))
####################################################################################
def test_trunc():
value = np.random.rand(1).item() * np.pi
assert NumCpp.truncScaler(value) == np.trunc(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(NumCpp.truncArray(cArray), np.trunc(data))
####################################################################################
def test_union1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.union1d(cArray1, cArray2).getNumpyArray().flatten(), np.union1d(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.union1d(cArray1, cArray2).getNumpyArray().flatten(), np.union1d(data1, data2))
####################################################################################
def test_unique():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.unique(cArray).getNumpyArray().flatten(), np.unique(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.unique(cArray).getNumpyArray().flatten(), np.unique(data))
####################################################################################
def test_unwrap():
value = np.random.randn(1).item() * 3 * np.pi
assert np.round(NumCpp.unwrapScaler(value), 9) == np.round(np.arctan2(np.sin(value), np.cos(value)), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.unwrapArray(cArray), 9), np.round(np.arctan2(np.sin(data), np.cos(data)), 9))
####################################################################################
def test_var():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.var(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.var(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.var(cArray, NumCpp.Axis.NONE) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.var(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.var(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.var(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.var(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.var(data, axis=1), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.var(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_vstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.vstack(cArray1, cArray2, cArray3, cArray4),
np.vstack([data1, data2, data3, data4]))
####################################################################################
def test_where():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
cArrayA = NumCpp.NdArray(shape)
cArrayB = NumCpp.NdArray(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
dataA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = np.random.randint(1, 100, [shape.rows, shape.cols])
cArrayMask.setArray(dataMask)
cArrayA.setArray(dataA)
cArrayB.setArray(dataB)
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
cArrayA = NumCpp.NdArrayComplexDouble(shape)
realA = np.random.randint(1, 100, [shape.rows, shape.cols])
imagA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataA = realA + 1j * imagA
cArrayA.setArray(dataA)
cArrayB = NumCpp.NdArrayComplexDouble(shape)
realB = np.random.randint(1, 100, [shape.rows, shape.cols])
imagB = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = realB + 1j * imagB
cArrayB.setArray(dataB)
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
cArrayA = NumCpp.NdArray(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
dataA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = np.random.randint(1, 100)
cArrayMask.setArray(dataMask)
cArrayA.setArray(dataA)
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, dataB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
cArrayA = NumCpp.NdArrayComplexDouble(shape)
realA = np.random.randint(1, 100, [shape.rows, shape.cols])
imagA = np.random.randint(1, 100, [shape.rows, shape.cols])
dataA = realA + 1j * imagA
cArrayA.setArray(dataA)
realB = np.random.randint(1, 100)
imagB = np.random.randint(1, 100)
dataB = realB + 1j * imagB
assert np.array_equal(NumCpp.where(cArrayMask, cArrayA, dataB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
cArrayB = NumCpp.NdArray(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
dataB = np.random.randint(1, 100, [shape.rows, shape.cols])
dataA = np.random.randint(1, 100)
cArrayMask.setArray(dataMask)
cArrayB.setArray(dataB)
assert np.array_equal(NumCpp.where(cArrayMask, dataA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
cArrayB = NumCpp.NdArrayComplexDouble(shape)
realB = np.random.randint(1, 100, [shape.rows, shape.cols])
imagB = np.random.randint(1, 100, [shape.rows, shape.cols])
dataB = realB + 1j * imagB
cArrayB.setArray(dataB)
realA = np.random.randint(1, 100)
imagA = np.random.randint(1, 100)
dataA = realA + 1j * imagA
assert np.array_equal(NumCpp.where(cArrayMask, dataA, cArrayB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
dataB = np.random.randint(1, 100)
dataA = np.random.randint(1, 100)
assert np.array_equal(NumCpp.where(cArrayMask, dataA, dataB), np.where(dataMask, dataA, dataB))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayMask = NumCpp.NdArrayBool(shape)
dataMask = np.random.randint(0, 2, [shape.rows, shape.cols], dtype=bool)
cArrayMask.setArray(dataMask)
realB = np.random.randint(1, 100)
imagB = np.random.randint(1, 100)
dataB = realB + 1j * imagB
realA = np.random.randint(1, 100)
imagA = np.random.randint(1, 100)
dataA = realA + 1j * imagA
assert np.array_equal(NumCpp.where(cArrayMask, dataA, dataB), np.where(dataMask, dataA, dataB))
####################################################################################
def test_zeros():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.zerosSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 0))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.zerosSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(0, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.zerosRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 0))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.zerosRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(0, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.zerosShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.zerosShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(0, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.zeros_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.zeros_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(0, 0)))
``` |
{
"source": "Jouad01/Proyecto_Pydevops",
"score": 3
} |
#### File: src/acceso_a_datos/agregador_base_de_datos.py
```python
import pymongo
import json
def agregador_base_de_datos(nueva_nave):
try:
myclient = pymongo.MongoClient("mongodb+srv://devops:12345@pro<EMAIL>.gk<EMAIL>.mongodb.net/ProyectoPydevops?retryWrites=true&w=majority")
naves = myclient.ProyectoPydevops.datos_naves
with open(nueva_nave) as json_file:
nave = json.load(json_file)
for element in nave:
if nave[element].isnumeric() == True:
nave[element] = int(nave[element])
if element.strip() == "caracteristicas":
nave[element] = nave[element].split(", ")
naves.insert_one(nave)
print("se ha insertado correctamente la nave")
return True
except:
return False
if __name__ == "__main__":
agregador_base_de_datos("naves.json")
```
#### File: test/acceso_a_datos/test_actualizar_documento.py
```python
import pytest
import sys
sys.path.append(".")
from domain.src.acceso_a_datos.actualizar_documento import actualizar_documento
mongo_db_url = "mongodb+srv://devops:12345@<EMAIL>opy<EMAIL>/?retryWrites=true&w=majority"
@pytest.mark.actualizar_base_de_datos
def test_actualizar_base_de_datos():
assert actualizar_documento(mongo_db_url, {"modelo":"Destructor Estelar clase Venator"}, {"tasa":1500})
```
#### File: test/logica/test_actualizar_hugo.py
```python
import pytest
import sys
sys.path.append(".")
from domain.src.actualizar_hugo import proyecto_pydevops
#No se como testear en codigo con exits
#@pytest.mark.actualizar_hugo
#def actualizar_hugo():
# assert not proyecto_pydevops("croquetas")
``` |
{
"source": "JouAdr/Test",
"score": 2
} |
#### File: internetconsommation/consommationapp/views.py
```python
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import Http404
from .models import InternetConsumption
def home(request):
consommations = InternetConsumption.objects.all()
return render(request, 'home.html', {'consommations':consommations})
def user_details(request, pk):
try:
user = User.objects.get(pk=pk)
consommations = InternetConsumption.objects.all()
consommations_total = list()
for consommation in consommations:
consommations_total.append([consommation.user.username, (consommation.upload + consommation.download)])
except User.DoesNotExist:
raise Http404
return render(request, 'user_details.html', {'consommations':consommations, 'user':user, 'consommations_total':consommations_total})
``` |
{
"source": "joubin/gitlabber",
"score": 2
} |
#### File: gitlabber/gitlabber/cli.py
```python
import os
import sys
import logging
import logging.handlers
import enum
from argparse import ArgumentParser, RawTextHelpFormatter, FileType, SUPPRESS
from .gitlab_tree import GitlabTree
from .format import PrintFormat
from .method import CloneMethod
from . import __version__ as VERSION
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
def main():
args = parse_args(argv=None if sys.argv[1:] else ['--help'])
if args.version:
print(VERSION)
sys.exit(0)
if args.token is None:
print('Please specify a valid token with the -t flag or the \'GITLAB_TOKEN\' environment variable')
sys.exit(1)
if args.url is None:
print('Please specify a valid gitlab base url with the -h flag or the \'GITLAB_URL\' environment variable')
sys.exit(1)
config_logging(args)
includes=split(args.include)
excludes=split(args.exclude)
tree = GitlabTree(args.url, args.token, args.method, includes,
excludes, args.file, args.concurrency, args.verbose)
log.debug("Reading projects tree from gitlab at [%s]", args.url)
tree.load_tree()
if tree.is_empty():
log.fatal("The tree is empty, check your include/exclude patterns or run with more verbosity for debugging")
sys.exit(1)
if args.print:
tree.print_tree(args.print_format)
else:
tree.sync_tree(args.dest)
def split(arg):
return arg.split(",") if arg != "" else None
def config_logging(args):
if args.verbose:
handler = logging.StreamHandler(sys.stdout)
logging.root.handlers = []
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.root.addHandler(handler)
level = logging.ERROR if args.print else logging.DEBUG
logging.root.setLevel(level)
log.debug("verbose=[%s], print=[%s], log level set to [%s] level", args.verbose, args.print, level)
os.environ["GIT_PYTHON_TRACE"] = 'full'
def parse_args(argv=None):
example_text = '''examples:
clone an entire gitlab tree using a url and a token:
gitlabber -t <personal access token> -u <gitlab url>
only print the gitlab tree:
gitlabber -p .
clone only projects under subgroup 'MySubGroup' to location '~/GitlabRoot':
gitlabber -i '/MyGroup/MySubGroup**' ~/GitlabRoot
clone only projects under group 'MyGroup' excluding any projects under subgroup 'MySubGroup':
gitlabber -i '/MyGroup**' -x '/MyGroup/MySubGroup**' .
clone an entire gitlab tree except projects under groups named 'ArchiveGroup':
gitlabber -x '/ArchiveGroup**' .
clone projects that start with a case insensitive 'w' using a regular expression:
gitlabber -i '/{[w].*}' .
'''
parser = ArgumentParser(
description='Gitlabber - clones or pulls entire groups/projects tree from gitlab',
prog="gitlabber",
epilog=example_text,
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'dest',
nargs='?',
type=validate_path,
help='destination path for the cloned tree (created if doesn\'t exist)')
parser.add_argument(
'-t',
'--token',
metavar=('token'),
default=os.environ.get('GITLAB_TOKEN'),
help='gitlab personal access token https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html')
parser.add_argument(
'-u',
'--url',
metavar=('url'),
default=os.environ.get('GITLAB_URL'),
help='base gitlab url (e.g.: \'http://gitlab.mycompany.com\')')
parser.add_argument(
'--verbose',
action='store_true',
help='print more verbose output')
parser.add_argument(
'-f',
'--file',
metavar=('file'),
help=SUPPRESS)
parser.add_argument(
'-c',
'--concurrency',
default=os.environ.get('GITLABBER_GIT_CONCURRENCY', 1),
type=int,
metavar=('concurrency'),
help=SUPPRESS)
parser.add_argument(
'-p',
'--print',
action='store_true',
help='print the tree without cloning')
parser.add_argument(
'--print-format',
type=PrintFormat.argparse,
default=PrintFormat.TREE,
choices=list(PrintFormat),
help='print format (default: \'tree\')')
parser.add_argument(
'-m',
'--method',
type=CloneMethod.argparse,
choices=list(CloneMethod),
default=os.environ.get('GITLABBER_CLONE_METHOD', "ssh"),
help='the method to use for cloning (either "ssh" or "http")')
parser.add_argument(
'-i',
'--include',
metavar=('csv'),
default=os.environ.get('GITLABBER_INCLUDE', ""),
help='comma delimited list of glob patterns of paths to projects or groups to clone/pull')
parser.add_argument(
'-x',
'--exclude',
metavar=('csv'),
default=os.environ.get('GITLABBER_EXCLUDE', ""),
help='comma delimited list of glob patterns of paths to projects or groups to exclude from clone/pull')
parser.add_argument(
'--version',
action='store_true',
help='print the version')
args = parser.parse_args(argv)
args_print = vars(args).copy()
args_print['token'] = 'xxxxx'
log.debug("running with args [%s]", args_print)
return args
def validate_path(value):
if value.endswith('/'):
return value[:-1]
return value
```
#### File: gitlabber/gitlabber/gitlab_tree.py
```python
from gitlab import Gitlab
from anytree import Node, RenderTree
from anytree.exporter import DictExporter, JsonExporter
from anytree.importer import DictImporter
from .git import sync_tree
from .format import PrintFormat
from .method import CloneMethod
from .progress import ProgressBar
import yaml
import globre
import logging
import os
log = logging.getLogger(__name__)
class GitlabTree:
def __init__(self, url, token, method, includes=[], excludes=[], in_file=None, concurrency=1, disable_progress=False):
self.includes = includes
self.excludes = excludes
self.url = url
self.root = Node("", root_path="", url=url)
self.gitlab = Gitlab(url, private_token=token, ssl_verify=GitlabTree.get_ca_path())
self.method = method
self.in_file = in_file
self.concurrency = concurrency
self.disable_progress = disable_progress
self.progress = ProgressBar('* loading tree', disable_progress)
@staticmethod
def get_ca_path():
"""
returns REQUESTS_CA_BUNDLE, CURL_CA_BUNDLE, or True
"""
return next(item for item in [os.getenv('REQUESTS_CA_BUNDLE', None), os.getenv('CURL_CA_BUNDLE', None), True]
if item is not None)
def is_included(self, node):
'''
returns True if the node should be included.
if there are no include patterns then everything is included
any include patterns matching the root path will result in inclusion
'''
if self.includes is not None:
for include in self.includes:
if globre.match(include, node.root_path):
log.debug(
"Matched include path [%s] to node [%s]", include, node.root_path)
return True
else:
return True
def is_excluded(self, node):
'''
returns True if the node should be excluded
if the are no exclude patterns then nothing is excluded
any exclude pattern matching the root path will result in exclusion
'''
if self.excludes is not None:
for exclude in self.excludes:
if globre.match(exclude, node.root_path):
log.debug(
"Matched exclude path [%s] to node [%s]", exclude, node.root_path)
return True
return False
def filter_tree(self, parent):
for child in parent.children:
if not child.is_leaf:
self.filter_tree(child)
if child.is_leaf:
if not self.is_included(child):
child.parent = None
if self.is_excluded(child):
child.parent = None
else:
if not self.is_included(child):
child.parent = None
if self.is_excluded(child):
child.parent = None
def root_path(self, node):
return "/".join([str(n.name) for n in node.path])
def make_node(self, name, parent, url):
node = Node(name=name, parent=parent, url=url)
node.root_path = self.root_path(node)
return node
def add_projects(self, parent, projects):
for project in projects:
project_url = project.ssh_url_to_repo if self.method is CloneMethod.SSH else project.http_url_to_repo
node = self.make_node(project.name, parent,
url=project_url)
self.progress.show_progress(node.name, 'project')
def get_projects(self, group, parent):
projects = group.projects.list(as_list=False)
self.progress.update_progress_length(len(projects))
self.add_projects(parent, projects)
def get_subgroups(self, group, parent):
subgroups = group.subgroups.list(as_list=False)
self.progress.update_progress_length(len(subgroups))
for subgroup_def in subgroups:
subgroup = self.gitlab.groups.get(subgroup_def.id)
node = self.make_node(subgroup.name, parent, url=subgroup.web_url)
self.progress.show_progress(node.name, 'group')
self.get_subgroups(subgroup, node)
self.get_projects(subgroup, node)
def load_gitlab_tree(self):
groups = self.gitlab.groups.list(as_list=False)
self.progress.init_progress(len(groups))
for group in groups:
if group.parent_id is None:
node = self.make_node(group.name, self.root, url=group.web_url)
self.progress.show_progress(node.name, 'group')
self.get_subgroups(group, node)
self.get_projects(group, node)
elapsed = self.progress.finish_progress()
log.debug("Loading projects tree from gitlab took [%s]", elapsed)
def load_file_tree(self):
with open(self.in_file, 'r') as stream:
dct = yaml.safe_load(stream)
self.root = DictImporter().import_(dct)
def load_tree(self):
if self.in_file:
log.debug("Loading tree from file [%s]", self.in_file)
self.load_file_tree()
else:
log.debug("Loading projects tree gitlab server [%s]", self.url)
self.load_gitlab_tree()
log.debug("Fetched root node with [%d] projects" % len(
self.root.leaves))
self.filter_tree(self.root)
def print_tree(self, format=PrintFormat.TREE):
if format is PrintFormat.TREE:
self.print_tree_native()
elif format is PrintFormat.YAML:
self.print_tree_yaml()
elif format is PrintFormat.JSON:
self.print_tree_json()
else:
log.fatal("Invalid print format [%s]", format)
def print_tree_native(self):
for pre, _, node in RenderTree(self.root):
line = ""
if node.is_root:
line = "%s%s [%s]" % (pre, "root", self.url)
else:
line = "%s%s [%s]" % (pre, node.name, node.root_path)
print(line)
def print_tree_yaml(self):
dct = DictExporter().export(self.root)
print(yaml.dump(dct, default_flow_style=False))
def print_tree_json(self):
exporter = JsonExporter(indent=2, sort_keys=True)
print(exporter.export(self.root))
def sync_tree(self, dest):
log.debug("Going to clone/pull [%s] groups and [%s] projects" %
(len(self.root.descendants) - len(self.root.leaves), len(self.root.leaves)))
sync_tree(self.root, dest, concurrency=self.concurrency,
disable_progress=self.disable_progress)
def is_empty(self):
return self.root.height < 1
``` |
{
"source": "joubin/insecure-chromium",
"score": 2
} |
#### File: insecure-chromium/devutils/validate_patches.py
```python
import argparse
import ast
import base64
import collections
import email.utils
import json
import logging
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
from buildkit.common import ENCODING, get_logger, get_chromium_version
from buildkit.config import ConfigBundle
from buildkit.third_party import unidiff
from buildkit.third_party.unidiff.constants import LINE_TYPE_EMPTY, LINE_TYPE_NO_NEWLINE
from buildkit.patches import DEFAULT_PATCH_DIR
sys.path.pop(0)
try:
import requests
import requests.adapters
import urllib3.util
class _VerboseRetry(urllib3.util.Retry):
"""A more verbose version of HTTP Adatper about retries"""
def sleep_for_retry(self, response=None):
"""Sleeps for Retry-After, and logs the sleep time"""
if response:
retry_after = self.get_retry_after(response)
if retry_after:
get_logger().info(
'Got HTTP status %s with Retry-After header. Retrying after %s seconds...',
response.status, retry_after)
else:
get_logger().info(
'Could not find Retry-After header for HTTP response %s. Status reason: %s',
response.status, response.reason)
return super().sleep_for_retry(response)
def _sleep_backoff(self):
"""Log info about backoff sleep"""
get_logger().info('Running HTTP request sleep backoff')
super()._sleep_backoff()
def _get_requests_session():
session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(
max_retries=_VerboseRetry(
total=10,
read=10,
connect=10,
backoff_factor=8,
status_forcelist=urllib3.Retry.RETRY_AFTER_STATUS_CODES,
raise_on_status=False))
session.mount('http://', http_adapter)
session.mount('https://', http_adapter)
return session
except ImportError:
def _get_requests_session():
raise RuntimeError('The Python module "requests" is required for remote'
'file downloading. It can be installed from PyPI.')
_CONFIG_BUNDLES_PATH = Path(__file__).parent.parent / 'config_bundles'
_PATCHES_PATH = Path(__file__).parent.parent / 'patches'
_SRC_PATH = Path('src')
class _PatchValidationError(Exception):
"""Raised when patch validation fails"""
class _UnexpectedSyntaxError(RuntimeError):
"""Raised when unexpected syntax is used in DEPS"""
class _NotInRepoError(RuntimeError):
"""Raised when the remote file is not present in the given repo"""
class _DepsNodeVisitor(ast.NodeVisitor):
_valid_syntax_types = (ast.mod, ast.expr_context, ast.boolop, ast.Assign, ast.Add, ast.Name,
ast.Dict, ast.Str, ast.NameConstant, ast.List, ast.BinOp)
_allowed_callables = ('Var', )
def visit_Call(self, node): #pylint: disable=invalid-name
"""Override Call syntax handling"""
if node.func.id not in self._allowed_callables:
raise _UnexpectedSyntaxError('Unexpected call of "%s" at line %s, column %s' %
(node.func.id, node.lineno, node.col_offset))
def generic_visit(self, node):
for ast_type in self._valid_syntax_types:
if isinstance(node, ast_type):
super().generic_visit(node)
return
raise _UnexpectedSyntaxError('Unexpected {} at line {}, column {}'.format(
type(node).__name__, node.lineno, node.col_offset))
def _validate_deps(deps_text):
"""Returns True if the DEPS file passes validation; False otherwise"""
try:
_DepsNodeVisitor().visit(ast.parse(deps_text))
except _UnexpectedSyntaxError as exc:
get_logger().error('%s', exc)
return False
return True
def _deps_var(deps_globals):
"""Return a function that implements DEPS's Var() function"""
def _var_impl(var_name):
"""Implementation of Var() in DEPS"""
return deps_globals['vars'][var_name]
return _var_impl
def _parse_deps(deps_text):
"""Returns a dict of parsed DEPS data"""
deps_globals = {'__builtins__': None}
deps_globals['Var'] = _deps_var(deps_globals)
exec(deps_text, deps_globals) #pylint: disable=exec-used
return deps_globals
def _download_googlesource_file(download_session, repo_url, version, relative_path):
"""
Returns the contents of the text file with path within the given
googlesource.com repo as a string.
"""
if 'googlesource.com' not in repo_url:
raise ValueError('Repository URL is not a googlesource.com URL: {}'.format(repo_url))
full_url = repo_url + '/+/{}/{}?format=TEXT'.format(version, str(relative_path))
get_logger().debug('Downloading: %s', full_url)
response = download_session.get(full_url)
if response.status_code == 404:
raise _NotInRepoError()
response.raise_for_status()
# Assume all files that need patching are compatible with UTF-8
return base64.b64decode(response.text, validate=True).decode('UTF-8')
def _get_dep_value_url(deps_globals, dep_value):
"""Helper for _process_deps_entries"""
if isinstance(dep_value, str):
url = dep_value
elif isinstance(dep_value, dict):
if 'url' not in dep_value:
# Ignore other types like CIPD since
# it probably isn't necessary
return None
url = dep_value['url']
else:
raise NotImplementedError()
if '{' in url:
# Probably a Python format string
url = url.format(**deps_globals['vars'])
if url.count('@') != 1:
raise _PatchValidationError('Invalid number of @ symbols in URL: {}'.format(url))
return url
def _process_deps_entries(deps_globals, child_deps_tree, child_path, deps_use_relative_paths):
"""Helper for _get_child_deps_tree"""
for dep_path_str, dep_value in deps_globals.get('deps', dict()).items():
url = _get_dep_value_url(deps_globals, dep_value)
if url is None:
continue
dep_path = Path(dep_path_str)
if not deps_use_relative_paths:
try:
dep_path = Path(dep_path_str).relative_to(child_path)
except ValueError:
# Not applicable to the current DEPS tree path
continue
grandchild_deps_tree = None # Delaying creation of dict() until it's needed
for recursedeps_item in deps_globals.get('recursedeps', tuple()):
if isinstance(recursedeps_item, str):
if recursedeps_item == str(dep_path):
grandchild_deps_tree = 'DEPS'
else: # Some sort of iterable
recursedeps_item_path, recursedeps_item_depsfile = recursedeps_item
if recursedeps_item_path == str(dep_path):
grandchild_deps_tree = recursedeps_item_depsfile
if grandchild_deps_tree is None:
# This dep is not recursive; i.e. it is fully loaded
grandchild_deps_tree = dict()
child_deps_tree[dep_path] = (*url.split('@'), grandchild_deps_tree)
def _get_child_deps_tree(download_session, current_deps_tree, child_path, deps_use_relative_paths):
"""Helper for _download_source_file"""
repo_url, version, child_deps_tree = current_deps_tree[child_path]
if isinstance(child_deps_tree, str):
# Load unloaded DEPS
deps_globals = _parse_deps(
_download_googlesource_file(download_session, repo_url, version, child_deps_tree))
child_deps_tree = dict()
current_deps_tree[child_path] = (repo_url, version, child_deps_tree)
deps_use_relative_paths = deps_globals.get('use_relative_paths', False)
_process_deps_entries(deps_globals, child_deps_tree, child_path, deps_use_relative_paths)
return child_deps_tree, deps_use_relative_paths
def _get_last_chromium_modification():
"""Returns the last modification date of the chromium-browser-official tar file"""
with _get_requests_session() as session:
response = session.head(
'https://storage.googleapis.com/chromium-browser-official/chromium-{}.tar.xz'.format(
get_chromium_version()))
response.raise_for_status()
return email.utils.parsedate_to_datetime(response.headers['Last-Modified'])
def _get_gitiles_git_log_date(log_entry):
"""Helper for _get_gitiles_git_log_date"""
return email.utils.parsedate_to_datetime(log_entry['committer']['time'])
def _get_gitiles_commit_before_date(repo_url, target_branch, target_datetime):
"""Returns the hexadecimal hash of the closest commit before target_datetime"""
json_log_url = '{repo}/+log/{branch}?format=JSON'.format(repo=repo_url, branch=target_branch)
with _get_requests_session() as session:
response = session.get(json_log_url)
response.raise_for_status()
git_log = json.loads(response.text[5:]) # Trim closing delimiters for various structures
assert len(git_log) == 2 # 'log' and 'next' entries
assert 'log' in git_log
assert git_log['log']
git_log = git_log['log']
# Check boundary conditions
if _get_gitiles_git_log_date(git_log[0]) < target_datetime:
# Newest commit is older than target datetime
return git_log[0]['commit']
if _get_gitiles_git_log_date(git_log[-1]) > target_datetime:
# Oldest commit is newer than the target datetime; assume oldest is close enough.
get_logger().warning('Oldest entry in gitiles log for repo "%s" is newer than target; '
'continuing with oldest entry...')
return git_log[-1]['commit']
# Do binary search
low_index = 0
high_index = len(git_log) - 1
mid_index = high_index
while low_index != high_index:
mid_index = low_index + (high_index - low_index) // 2
if _get_gitiles_git_log_date(git_log[mid_index]) > target_datetime:
low_index = mid_index + 1
else:
high_index = mid_index
return git_log[mid_index]['commit']
class _FallbackRepoManager:
"""Retrieves fallback repos and caches data needed for determining repos"""
_GN_REPO_URL = 'https://gn.googlesource.com/gn.git'
def __init__(self):
self._cache_gn_version = None
@property
def gn_version(self):
"""
Returns the version of the GN repo for the Chromium version used by buildkit
"""
if not self._cache_gn_version:
# Because there seems to be no reference to the logic for generating the
# chromium-browser-official tar file, it's possible that it is being generated
# by an internal script that manually injects the GN repository files.
# Therefore, assume that the GN version used in the chromium-browser-official tar
# files correspond to the latest commit in the master branch of the GN repository
# at the time of the tar file's generation. We can get an approximation for the
# generation time by using the last modification date of the tar file on
# Google's file server.
self._cache_gn_version = _get_gitiles_commit_before_date(
self._GN_REPO_URL, 'master', _get_last_chromium_modification())
return self._cache_gn_version
def get_fallback(self, current_relative_path, current_node, root_deps_tree):
"""
Helper for _download_source_file
It returns a new (repo_url, version, new_relative_path) to attempt a file download with
"""
assert len(current_node) == 3
# GN special processing
try:
new_relative_path = current_relative_path.relative_to('tools/gn')
except ValueError:
pass
else:
if current_node is root_deps_tree[_SRC_PATH]:
get_logger().info('Redirecting to GN repo version %s for path: %s', self.gn_version,
current_relative_path)
return (self._GN_REPO_URL, self.gn_version, new_relative_path)
return None, None, None
def _get_target_file_deps_node(download_session, root_deps_tree, target_file):
"""
Helper for _download_source_file
Returns the corresponding repo containing target_file based on the DEPS tree
"""
# The "deps" from the current DEPS file
current_deps_tree = root_deps_tree
current_node = None
# Path relative to the current node (i.e. DEPS file)
current_relative_path = Path('src', target_file)
previous_relative_path = None
deps_use_relative_paths = False
child_path = None
while current_relative_path != previous_relative_path:
previous_relative_path = current_relative_path
for child_path in current_deps_tree:
try:
current_relative_path = previous_relative_path.relative_to(child_path)
except ValueError:
# previous_relative_path does not start with child_path
continue
current_node = current_deps_tree[child_path]
# current_node will match with current_deps_tree after the following statement
current_deps_tree, deps_use_relative_paths = _get_child_deps_tree(
download_session, current_deps_tree, child_path, deps_use_relative_paths)
break
assert not current_node is None
return current_node, current_relative_path
def _download_source_file(download_session, root_deps_tree, fallback_repo_manager, target_file):
"""
Downloads the source tree file from googlesource.com
download_session is an active requests.Session() object
deps_dir is a pathlib.Path to the directory containing a DEPS file.
"""
current_node, current_relative_path = _get_target_file_deps_node(download_session,
root_deps_tree, target_file)
# Attempt download with potential fallback logic
repo_url, version, _ = current_node
try:
# Download with DEPS-provided repo
return _download_googlesource_file(download_session, repo_url, version,
current_relative_path)
except _NotInRepoError:
pass
get_logger().debug(
'Path "%s" (relative: "%s") not found using DEPS tree; finding fallback repo...',
target_file, current_relative_path)
repo_url, version, current_relative_path = fallback_repo_manager.get_fallback(
current_relative_path, current_node, root_deps_tree)
if not repo_url:
get_logger().error('No fallback repo found for "%s" (relative: "%s")', target_file,
current_relative_path)
raise _NotInRepoError()
try:
# Download with fallback repo
return _download_googlesource_file(download_session, repo_url, version,
current_relative_path)
except _NotInRepoError:
pass
get_logger().error('File "%s" (relative: "%s") not found in fallback repo "%s", version "%s"',
target_file, current_relative_path, repo_url, version)
raise _NotInRepoError()
def _initialize_deps_tree():
"""
Initializes and returns a dependency tree for DEPS files
The DEPS tree is a dict has the following format:
key - pathlib.Path relative to the DEPS file's path
value - tuple(repo_url, version, recursive dict here)
repo_url is the URL to the dependency's repository root
If the recursive dict is a string, then it is a string to the DEPS file to load
if needed
download_session is an active requests.Session() object
"""
root_deps_tree = {
_SRC_PATH: ('https://chromium.googlesource.com/chromium/src.git', get_chromium_version(),
'DEPS')
}
return root_deps_tree
def _retrieve_remote_files(file_iter):
"""
Retrieves all file paths in file_iter from Google
file_iter is an iterable of strings that are relative UNIX paths to
files in the Chromium source.
Returns a dict of relative UNIX path strings to a list of lines in the file as strings
"""
files = dict()
root_deps_tree = _initialize_deps_tree()
try:
total_files = len(file_iter)
except TypeError:
total_files = None
logger = get_logger()
if total_files is None:
logger.info('Downloading remote files...')
else:
logger.info('Downloading %d remote files...', total_files)
last_progress = 0
file_count = 0
fallback_repo_manager = _FallbackRepoManager()
with _get_requests_session() as download_session:
download_session.stream = False # To ensure connection to Google can be reused
for file_path in file_iter:
if total_files:
file_count += 1
current_progress = file_count * 100 // total_files // 5 * 5
if current_progress != last_progress:
last_progress = current_progress
logger.info('%d%% downloaded', current_progress)
else:
current_progress = file_count // 20 * 20
if current_progress != last_progress:
last_progress = current_progress
logger.info('%d files downloaded', current_progress)
try:
files[file_path] = _download_source_file(
download_session, root_deps_tree, fallback_repo_manager, file_path).split('\n')
except _NotInRepoError:
get_logger().warning('Could not find "%s" remotely. Skipping...', file_path)
return files
def _retrieve_local_files(file_iter, source_dir):
"""
Retrieves all file paths in file_iter from the local source tree
file_iter is an iterable of strings that are relative UNIX paths to
files in the Chromium source.
Returns a dict of relative UNIX path strings to a list of lines in the file as strings
"""
files = dict()
for file_path in file_iter:
try:
files[file_path] = (source_dir / file_path).read_text().split('\n')
except FileNotFoundError:
get_logger().warning('Missing file from patches: %s', file_path)
if not files:
get_logger().error('All files used by patches are missing!')
return files
def _generate_full_bundle_depends(bundle_path, bundle_cache, unexplored_bundles):
"""
Generates the bundle's and dependencies' dependencies ordered by the deepest dependency first
"""
for dependency_name in reversed(bundle_cache[bundle_path].bundlemeta.depends):
dependency_path = bundle_path.with_name(dependency_name)
if dependency_path in unexplored_bundles:
# Remove the bundle from being explored in _get_patch_trie()
# Since this bundle is a dependency of something else, it must be checked first
# before the dependent
unexplored_bundles.remove(dependency_path)
# First, get all dependencies of the current dependency in order
yield from _generate_full_bundle_depends(dependency_path, bundle_cache, unexplored_bundles)
# Then, add the dependency itself
yield dependency_path
def _get_patch_trie(bundle_cache, target_bundles=None):
"""
Returns a trie of config bundles and their dependencies. It is a dict of the following format:
key: pathlib.Path of config bundle
value: dict of direct dependents of said bundle, in the same format as the surrounding dict.
"""
# Returned trie
patch_trie = dict()
# Set of bundles that are not children of the root node (i.e. not the lowest dependency)
# It is assumed that any bundle that is not used as a lowest dependency will never
# be used as a lowest dependency. This is the case for mixin bundles.
non_root_children = set()
# All bundles that haven't been added to the trie, either as a dependency or
# in this function explicitly
if target_bundles:
unexplored_bundles = set(target_bundles)
else:
unexplored_bundles = set(bundle_cache.keys())
# Construct patch_trie
while unexplored_bundles:
current_path = unexplored_bundles.pop()
current_trie_node = patch_trie # The root node of the trie
# Construct a branch in the patch trie up to the closest dependency
# by using the desired traversal to the config bundle.
# This is essentially a depth-first tree construction algorithm
for dependency_path in _generate_full_bundle_depends(current_path, bundle_cache,
unexplored_bundles):
if current_trie_node != patch_trie:
non_root_children.add(dependency_path)
if not dependency_path in current_trie_node:
current_trie_node[dependency_path] = dict()
# Walk to the child node
current_trie_node = current_trie_node[dependency_path]
# Finally, add the dependency itself as a leaf node of the trie
# If the assertion fails, the algorithm is broken
assert current_path not in current_trie_node
current_trie_node[current_path] = dict()
# Remove non-root node children
for non_root_child in non_root_children.intersection(patch_trie.keys()):
del patch_trie[non_root_child]
# Potential optimization: Check if leaves patch the same files as their parents.
# (i.e. if the set of files patched by the bundle is disjoint from that of the parent bundle)
# If not, move them up to their grandparent, rescan the tree leaves, and repeat
# Then, group leaves and their parents and see if the set of files patched is disjoint from
# that of the grandparents. Repeat this with great-grandparents and increasingly larger
# groupings until all groupings end up including the top-level nodes.
# This optimization saves memory by not needing to store all the patched files of
# a long branch at once.
# However, since the trie for the current structure is quite flat and all bundles are
# quite small (except common, which is by far the largest), this isn't necessary for now.
return patch_trie
def _modify_file_lines(patched_file, file_lines):
"""Helper for _apply_file_unidiff"""
# Cursor for keeping track of the current line during hunk application
# NOTE: The cursor is based on the line list index, not the line number!
line_cursor = None
for hunk in patched_file:
# Validate hunk will match
if not hunk.is_valid():
raise _PatchValidationError('Hunk is not valid: {}'.format(repr(hunk)))
line_cursor = hunk.target_start - 1
for line in hunk:
normalized_line = line.value.rstrip('\n')
if line.is_added:
file_lines[line_cursor:line_cursor] = (normalized_line, )
line_cursor += 1
elif line.is_removed:
if normalized_line != file_lines[line_cursor]:
raise _PatchValidationError(
"Line '{}' does not match removal line '{}' from patch".format(
file_lines[line_cursor], normalized_line))
del file_lines[line_cursor]
elif line.is_context:
if not normalized_line and line_cursor == len(file_lines):
# We reached the end of the file
break
if normalized_line != file_lines[line_cursor]:
raise _PatchValidationError(
"Line '{}' does not match context line '{}' from patch".format(
file_lines[line_cursor], normalized_line))
line_cursor += 1
else:
assert line.line_type in (LINE_TYPE_EMPTY, LINE_TYPE_NO_NEWLINE)
def _apply_file_unidiff(patched_file, child_files, parent_file_layers):
"""Applies the unidiff.PatchedFile to the files at the current file layer"""
patched_file_path = Path(patched_file.path)
if patched_file.is_added_file:
if patched_file_path in child_files:
assert child_files[patched_file_path] is None
assert len(patched_file) == 1 # Should be only one hunk
assert patched_file[0].removed == 0
assert patched_file[0].target_start == 1
child_files[patched_file_path] = [x.value for x in patched_file[0]]
elif patched_file.is_removed_file:
child_files[patched_file_path] = None
else: # Patching an existing file
assert patched_file.is_modified_file
if patched_file_path not in child_files:
child_files[patched_file_path] = parent_file_layers[patched_file_path].copy()
_modify_file_lines(patched_file, child_files[patched_file_path])
def _apply_child_bundle_patches(child_path, had_failure, file_layers, patch_cache, bundle_cache):
"""Helper for _test_patches"""
# Whether the curent patch trie branch failed validation
branch_validation_failed = False
assert child_path in bundle_cache
try:
child_patch_order = bundle_cache[child_path].patch_order
except KeyError:
# No patches in the bundle
pass
else:
patches_outdated = bundle_cache[child_path].bundlemeta.patches_outdated
for patch_path_str in child_patch_order:
for patched_file in patch_cache[patch_path_str]:
try:
_apply_file_unidiff(patched_file, file_layers.maps[0], file_layers.parents)
except _PatchValidationError as exc:
# Branch failed validation; abort
get_logger().warning('Patch failed validation: %s', patch_path_str)
get_logger().debug('Specifically, file "%s" failed validation: %s',
patched_file.path, exc)
branch_validation_failed = True
had_failure = had_failure or not patches_outdated
break
except: #pylint: disable=bare-except
# Branch failed validation; abort
get_logger().warning('Patch failed validation: %s', patch_path_str)
get_logger().debug(
'Specifically, file "%s" caused exception while applying:',
patched_file.path,
exc_info=True)
branch_validation_failed = True
had_failure = had_failure or not patches_outdated
break
if branch_validation_failed:
if patches_outdated:
get_logger().warning('%s is marked with outdated patches. Ignoring failure...',
child_path.name)
break
if branch_validation_failed != patches_outdated:
# Metadata for patch validity is out-of-date
if branch_validation_failed:
get_logger().error(("%s patches have become outdated. "
"Please update the patches, or add 'patches_outdated = true' "
"to its bundlemeta.ini"), child_path.name)
else:
get_logger().error(
('"%s" is no longer out-of-date! '
'Please remove the patches_outdated marking from its bundlemeta.ini'),
child_path.name)
had_failure = True
return had_failure, branch_validation_failed
def _test_patches(patch_trie, bundle_cache, patch_cache, orig_files):
"""
Tests the patches with DFS in the trie of config bundles
Returns a boolean indicating if any of the patches have failed
"""
# Stack of iterables over each node's children
# First, insert iterable over root node's children
node_iter_stack = [iter(patch_trie.items())]
# Stack of files at each node differing from the parent
# The root node thus contains all the files to be patched
file_layers = collections.ChainMap(orig_files)
# Whether any branch had failed validation
had_failure = False
while node_iter_stack:
try:
child_path, grandchildren = next(node_iter_stack[-1])
except StopIteration:
# Finished exploring all children of this node
node_iter_stack.pop()
del file_layers.maps[0]
continue
# Add storage for child's patched files
file_layers = file_layers.new_child()
# Apply children's patches
get_logger().info('Verifying at depth %s: %s ...', len(node_iter_stack), child_path.name)
# Potential optimization: Use interval tree data structure instead of copying
# the entire array to track only diffs
had_failure, branch_validation_failed = _apply_child_bundle_patches(
child_path, had_failure, file_layers, patch_cache, bundle_cache)
if branch_validation_failed:
# Add blank children to force stack to move onto the next branch
node_iter_stack.append(iter(tuple()))
else:
# Explore this child's children
node_iter_stack.append(iter(grandchildren.items()))
return had_failure
def _load_all_patches(bundle_iter, patch_dir=DEFAULT_PATCH_DIR):
"""Returns a dict of relative UNIX path strings to unidiff.PatchSet"""
unidiff_dict = dict()
for bundle in bundle_iter:
try:
patch_order_iter = iter(bundle.patch_order)
except KeyError:
continue
for relative_path in patch_order_iter:
if relative_path in unidiff_dict:
continue
unidiff_dict[relative_path] = unidiff.PatchSet.from_filename(
str(patch_dir / relative_path), encoding=ENCODING)
return unidiff_dict
def _get_required_files(patch_cache):
"""Returns an iterable of pathlib.Path files needed from the source tree for patching"""
new_files = set() # Files introduced by patches
file_set = set()
for patch_set in patch_cache.values():
for patched_file in patch_set:
if patched_file.is_added_file:
new_files.add(patched_file.path)
elif patched_file.path not in new_files:
file_set.add(Path(patched_file.path))
return file_set
def _get_orig_files(args, required_files, parser):
"""
Helper for main to get orig_files
Exits the program if --cache-remote debugging option is used
"""
if args.local:
orig_files = _retrieve_local_files(required_files, args.local)
else: # --remote and --cache-remote
orig_files = _retrieve_remote_files(required_files)
if args.cache_remote:
for file_path, file_content in orig_files.items():
if not (args.cache_remote / file_path).parent.exists():
(args.cache_remote / file_path).parent.mkdir(parents=True)
with (args.cache_remote / file_path).open('w', encoding=ENCODING) as cache_file:
cache_file.write('\n'.join(file_content))
parser.exit()
return orig_files
def main():
"""CLI Entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-b',
'--bundle',
action='append',
type=Path,
metavar='DIRECTORY',
help=('Verify patches for a config bundle. Specify multiple times to '
'verify multiple bundles. Without specifying, all bundles will be verified.'))
parser.add_argument(
'-v', '--verbose', action='store_true', help='Log more information to stdout/stderr')
file_source_group = parser.add_mutually_exclusive_group(required=True)
file_source_group.add_argument(
'-l',
'--local',
type=Path,
metavar='DIRECTORY',
help=
'Use a local source tree. It must be UNMODIFIED, otherwise the results will not be valid.')
file_source_group.add_argument(
'-r',
'--remote',
action='store_true',
help=('Download the required source tree files from Google. '
'This feature requires the Python module "requests". If you do not want to '
'install this, consider using --local instead.'))
file_source_group.add_argument(
'-c',
'--cache-remote',
type=Path,
metavar='DIRECTORY',
help='(For debugging) Store the required remote files in an empty local directory')
args = parser.parse_args()
if args.cache_remote and not args.cache_remote.exists():
if args.cache_remote.parent.exists():
args.cache_remote.mkdir()
else:
parser.error('Parent of cache path {} does not exist'.format(args.cache_remote))
if args.verbose:
get_logger(initial_level=logging.DEBUG, prepend_timestamp=False, log_init=False)
else:
get_logger(initial_level=logging.INFO, prepend_timestamp=False, log_init=False)
if args.bundle:
for bundle_path in args.bundle:
if not bundle_path.exists():
parser.error('Could not find config bundle at: {}'.format(bundle_path))
# Path to bundle -> ConfigBundle without dependencies
bundle_cache = dict(
map(lambda x: (x, ConfigBundle(x, load_depends=False)), _CONFIG_BUNDLES_PATH.iterdir()))
patch_trie = _get_patch_trie(bundle_cache, args.bundle)
patch_cache = _load_all_patches(bundle_cache.values())
required_files = _get_required_files(patch_cache)
orig_files = _get_orig_files(args, required_files, parser)
had_failure = _test_patches(patch_trie, bundle_cache, patch_cache, orig_files)
if had_failure:
get_logger().error('***FAILED VALIDATION; SEE ABOVE***')
if not args.verbose:
get_logger().info('(For more error details, re-run with the "-v" flag)')
parser.exit(status=1)
else:
get_logger().info('Passed validation')
if __name__ == '__main__':
main()
``` |
{
"source": "joubin/opencanary",
"score": 2
} |
#### File: opencanary/modules/mysql.py
```python
from opencanary.modules import CanaryService
from opencanary.config import ConfigException, PY3
from twisted.protocols.policies import TimeoutMixin
from twisted.internet.protocol import Protocol
from twisted.internet.protocol import Factory
from twisted.application import internet
from random import randint
import struct
import re
UINT_MAX = 0xFFFFFFFF
class MySQL(Protocol, TimeoutMixin):
HEADER_LEN = 4
ERR_CODE_ACCESS_DENIED = 1045
ERR_CODE_PKT_ORDER = 1156
SQL_STATE_ACCESS_DENIED = b"2800"
SQL_STATE_PKT_ORDER = b"08S01"
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
def __init__(self, factory):
self._busyReceiving = False
self._buffer = b""
self.factory = factory
self.threadid = factory.next_threadid()
self.setTimeout(10)
@staticmethod
def build_packet(seq_id, data):
l = len(data)
if l > 0xffffff or l <= 0:
return None
if seq_id > 0xff or seq_id < 0:
return None
# chop to 3 byte int
_length = struct.pack('<I', l)[:-1]
_seq_id = struct.pack('B', seq_id)
return _length + _seq_id + data
@staticmethod
def parse_auth(data):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
offset = 4 + 4 + 1 + 23
i = data.find(b"\x00", offset)
if i < 0:
return None, None
username = data[offset:i]
i += 1
if PY3:
plen = data[i]
else:
plen = struct.unpack('B', data[i])[0]
i+=1
if plen == 0:
return username, None
password="".join("{:02x}".format(ord(c)) for c in data[i:i+plen])
return username, password
def consume_packet(self):
if len(self._buffer) < MySQL.HEADER_LEN:
return None, None
length = struct.unpack('<I', self._buffer[:3] + b'\x00')[0]
if PY3:
seq_id = self._buffer[3]
else:
seq_id = struct.unpack('<B', self._buffer[3])[0]
# enough buffer data to consume packet?
if len(self._buffer) < MySQL.HEADER_LEN + length:
return seq_id, None
payload = self._buffer[MySQL.HEADER_LEN: MySQL.HEADER_LEN + length]
self._buffer = self._buffer[MySQL.HEADER_LEN + length:]
return seq_id, payload
def server_greeting(self):
# struct.pack returns a byte string for py2 and py3
_threadid = struct.pack('<I', self.threadid)
# TODO: randomize salts embedded here
data = b'\x0a' + self.factory.canaryservice.banner + b'\x00' + _threadid + b'\x25\x73\x36\x51\x74\x77\x75\x69\x00\xff\xf7\x08\x02\x00\x0f\x80\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x2e\x47\x5c\x78\x67\x7a\x4f\x5b\x5c\x3e\x5c\x39\x00\x6d\x79\x73\x71\x6c\x5f\x6e\x61\x74\x69\x76\x65\x5f\x70\x61\x73\x73\x77\x6f\x72\x64\x00'
return self.build_packet(0x00, data)
def access_denied(self, seq_id, user, password=None):
Y = b"YES" if password else b"NO"
ip = self.transport.getPeer().host
msg = "Access denied for user '%s'@'%s' (using password: %s)" % (user, ip, Y)
return self.error_pkt(seq_id, MySQL.ERR_CODE_ACCESS_DENIED,
MySQL.SQL_STATE_ACCESS_DENIED, msg.encode())
def unordered_pkt(self, seq_id):
msg = "Got packets out of order".encode()
return self.error_pkt(seq_id, MySQL.ERR_CODE_PKT_ORDER,
MySQL.SQL_STATE_PKT_ORDER, msg)
def error_pkt(self, seq_id, err_code, sql_state, msg):
data = b"\xff" + struct.pack("<H", err_code) + b"\x23#" + sql_state + msg
return self.build_packet(0x02, data)
def connectionMade(self):
self.transport.write(self.server_greeting())
def dataReceived(self, data):
self._buffer += data
self.resetTimeout()
if self._busyReceiving:
return
try:
self._busyReceiving = True
seq_id, payload = self.consume_packet()
if seq_id is None:
return
elif seq_id != 1:
# error on wrong seq_id, even if payload hasn't arrived yet
self.transport.write(self.unordered_pkt(0x01))
self.transport.loseConnection()
return
elif payload is not None:
# seq_id == 1 and payload has arrived
username, password = self.parse_auth(payload)
if username:
logdata = {'USERNAME': username, 'PASSWORD': password}
self.factory.canaryservice.log(logdata, transport=self.transport)
self.transport.write(self.access_denied(0x02, username, password))
self.transport.loseConnection()
finally:
self._busyReceiving = False
def timeoutConnection(self):
self.transport.abortConnection()
class SQLFactory(Factory):
def __init__(self):
self.threadid = randint(0,0x0FFF)
def next_threadid(self):
self.threadid = (self.threadid + randint(1,5)) & UINT_MAX
return self.threadid
def buildProtocol(self, addr):
return MySQL(self)
class CanaryMySQL(CanaryService):
NAME = 'mysql'
def __init__(self, config=None, logger=None):
CanaryService.__init__(self, config=config, logger=logger)
self.port = int(config.getVal("mysql.port", default=3306))
self.banner = config.getVal("mysql.banner", default="5.5.43-0ubuntu0.14.04.1").encode()
self.logtype = logger.LOG_MYSQL_LOGIN_ATTEMPT
self.listen_addr = config.getVal('device.listen_addr', default='')
if re.search('^[3456]\.[-_~.+\w]+$', self.banner.decode()) is None:
raise ConfigException("sql.banner", "Invalid MySQL Banner")
def getService(self):
factory = SQLFactory()
factory.canaryservice = self
return internet.TCPServer(self.port, factory, interface=self.listen_addr)
``` |
{
"source": "joubin/Reddit2PDF",
"score": 3
} |
#### File: joubin/Reddit2PDF/GenPDF.py
```python
import os
import pdfkit
from readability.readability import Document
import urllib2, ssl
from urlparse import urlparse
import tldextract
class PDFGenerator(object):
"""docstring for PDFGenerator"""
def __init__(self, path="pdf"):
super(PDFGenerator, self).__init__()
self.path=path
self.options = {
'page-size': 'Letter',
'minimum-font-size': '18',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.25in',
'encoding': "UTF-8",
'no-outline': None,
'quiet': ''
}
if not os.path.exists(path):
os.makedirs(path)
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'Mozilla/5.0')]
self.gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # Only for gangstars
def generatePDF(self, url, order):
ext = tldextract.extract(url)
domain= ext.domain+"."+ext.tld
html = self.opener.open(url).read()
readable_article = Document(html).summary()
readable_title = Document(html).short_title()
self.options["title"] = readable_title
pdfkit.from_string(readable_article, self.path+"/"+str(order)+"-"+domain+"-"+readable_title+'.pdf', options=self.options)
def generateIterator(self, item):
order = 0
for i in item:
if "reddit.com" not in i.url:
order += 1
print i
try:
self.generatePDF(i.url, order)
except Exception, e:
with open("log.log", "a+") as f:
f.write("================\n")
f.write(i.url)
f.write("\n")
f.write(str(e))
f.write("\n")
f.write("================\n")
if __name__ == '__main__':
p = PDFGenerator()
p.generatePDF("http://jabbari.me", 3331) #3331 because I like 3's and prime numbers :D
``` |
{
"source": "joubin/ThreatPlaybook",
"score": 2
} |
#### File: v1/threat_playbook/__init__.py
```python
import os
from sys import exit
from docopt import docopt
from texttable import Texttable
from huepy import *
from models import *
import threat_playbook.ThreatPlaybook
from glob import glob
import json
from pprint import pprint
import yaml
import ntpath
from tinydb import TinyDB, Query
connect('threat_playbook')
module_path = os.path.dirname(threat_playbook.__file__)
rdb = TinyDB(os.path.join(module_path, "repo.json"))
Repo = Query()
def set_project(project_name):
focus_project = Project.objects.get(name = project_name)
if focus_project:
os.environ['TP_PROJECT_NAME'] = focus_project.name
os.environ['TP_PROJECT_ID'] = str(focus_project.id)
else:
print(bad("There's no project by that name. Please try again"))
def create_new_project(proj_name):
list_of_directories = ['results', 'cases', 'entities']
file_name = '{0}/SecurityTest.robot'.format(proj_name)
for directory in list_of_directories:
directory = '{0}/{1}'.format(proj_name, directory)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isfile(file_name):
file(file_name, 'w').close()
entities_dir = os.path.join(os.getcwd(), "{0}/entities".format(proj_name))
if not os.path.isfile(entities_dir + "/entities_connections.yml"):
file(entities_dir + "/entities_connections.yml", 'w').close()
if not os.path.isfile('.env'):
file('.env', 'w').close()
def get_repo_item(format = 'table'):
all_vuls = rdb.all()
if format == 'table':
table = Texttable()
table.set_cols_align(["l", "c", "c"])
table.set_cols_valign(["m", "m", "m"])
table.add_row(['Name', 'CWE', 'Mitigations'])
for one in all_vuls:
table.add_row([one['name'], one['cwe'], len(one['mitigations'])])
print(table.draw())
elif format == 'json':
print json.dumps(all_vuls)
elif format == 'yaml':
print yaml.dump(all_vuls)
else:
print bad('Unrecognized Input. Bye!')
def print_vul_table(vuls):
table = Texttable()
table.set_cols_align(["l", "c", "c", "c"])
table.set_cols_valign(["m", "m", "m", "m"])
table.add_row(['Name', 'Tool', 'Severity', 'CWE'])
for vul in vuls:
if vul.severity == 3:
sev = 'High'
elif vul.severity == 2:
sev = 'Medium'
else:
sev = 'Low'
table.add_row([vul.name, vul.tool, sev, vul.cwe])
print table.draw() + "\n"
def get_vulnerabilities(filter = None, format = 'table'):
if not os.environ['TP_PROJECT_NAME'] or os.environ['TP_PROJECT_ID']:
print(bad("You need to set the project first by using `threat-playbook set-project <projectname>"))
latest_session = Session.objects.order_by("created_on").first() #fetches the latest session
if filter:
all_vuls = Vulnerability.objects(session = latest_session, severity = filter).\
exclude('_id', 'models', 'cases')
all_vuls = Vulnerability.objects(session = latest_session)
if format == 'table':
print_vul_table(all_vuls)
elif format == 'json':
pprint(all_vuls.to_json())
else:
print("Unable to understand the input. Exiting...")
def load_reload_repo_db():
repo_path = os.path.join(module_path, "repo/")
for single in glob(repo_path + "*.yaml"):
single_name = ntpath.basename(single).split('.')[0]
with open(single,'r') as rfile:
rval = rfile.read()
rcon = yaml.safe_load(rval)
has_value = rdb.get(Repo.repo_id == single_name)
if not has_value:
x = {}
x['repo_id'] = single_name
x.update(rcon)
rdb.insert(x)
def main():
arguments = docopt(__doc__, version = 'ThreatPlaybook CLI for v 1.2')
if arguments['new-project']:
if arguments['<projectname>']:
if ' ' in arguments['<projectname>']:
print(bad('There seems to be whitespace in your project name. Only have A-Za-Z0-9_- values'))
exit(1)
try:
create_new_project(arguments['<projectname>'])
print(good("Project: {} created successfully. You should have generated boilerplate code".format(arguments['<projectname>'])))
except Exception as e:
print(bad(e))
exit(1)
if arguments['set-project']:
if arguments['<project_name>']:
try:
set_project(arguments['<project_name>'])
print(good("Successfully set Project to {}".format(arguments['<project_name>'])))
except Exception as e:
print(bad(e))
exit(1)
if arguments['show-vulns']:
filter = None
format = 'table'
if arguments['--filter']:
filter = arguments['--filter']
if arguments['--format']:
format = arguments['--format']
try:
get_vulnerabilities(filter, format)
except Exception as e:
print(e)
exit(1)
if arguments['show-repo']:
if arguments['--repoformat']:
try:
get_repo_item(arguments['--repoformat'])
except Exception as e:
print(bad(e))
exit(1)
else:
try:
get_repo_item()
except Exception as e:
print(bad(e))
exit(1)
if arguments['reload-repo']:
try:
print(good("Attempting to reload repo DB"))
load_reload_repo_db()
except Exception as e:
print(bad(e))
exit(1)
```
#### File: v1/threat_playbook/utils.py
```python
from models import *
import json
from base64 import b64encode
from schema import Schema, And, Use, Optional
import lxml.etree as xml
import re
import sys
vul_schema = Schema(
{
'cwe': And(Use(int)),
'name': basestring,
'tool': basestring,
'severity': And(Use(int), lambda n: 0 <= n <= 3),
'description': basestring,
'target_name': basestring,
Optional('observation'): basestring,
Optional('remediation'): basestring,
},
ignore_extra_keys=False
)
def parse_zap_json_file(zap_file, target, session, uri):
with open(zap_file, 'r') as zapfile:
zap_data = json.loads(zapfile.read())
alerts = None
pre_alerts = zap_data['Report']['Sites']
if isinstance(pre_alerts, list):
for pre in pre_alerts:
if uri in pre['Host']:
alerts = pre
if isinstance(pre_alerts, dict):
alerts = pre_alerts
alerts = alerts['Alerts']['AlertItem']
if alerts:
if isinstance(alerts, dict):
alerts = [alerts]
if isinstance(alerts, list):
for alert in alerts:
vul = Vulnerability()
vul.tool = 'zap'
vul.target = target
vul.name = alert['Alert']
vul.severity = {
'High': 3,
'Medium': 2,
'Low': 1
}.get(alert['RiskDesc'], 0)
vul.description = alert['Desc']
vul.cwe = alert['CWEID']
vul.remediation = alert['Solution']
vul.evidences += _map_alert_to_evidence(alert)
all_linked_models = ThreatModel.objects(cwe=alert['CWEID'])
second_link_models = ThreatModel.objects(related_cwes=alert['CWEID'])
if len(all_linked_models) > 0:
rel_models = []
[rel_models.append(one) for one in all_linked_models]
model_ids = [model.id for model in rel_models]
vul.models.extend(model_ids)
if len(second_link_models) > 0:
slink_models = []
[slink_models.append(one) for one in second_link_models]
s_model_ids = [model.id for model in slink_models]
vul.models.extend(s_model_ids)
vul.session = session
vul.target = target
vul.save()
else:
raise Exception('Unable to parse alerts in report.')
def pp_json(file_content):
return json.dumps(json.loads(file_content), indent=4, sort_keys=True)
def manage_nikto_xml_file(xml_file):
try:
nreport = xml.parse(xml_file)
root_elem = nreport.getroot()
scans = root_elem.findall('niktoscan/scandetails')
report_content = ''
for scan in scans:
report_content += _format_scan_result(scan)
return report_content
except BaseException as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Error: {0} {1}'.format(e, exc_traceback.tb_lineno))
def manage_testssl_json_file(json_file):
try:
with open(json_file, 'r') as testssl_json:
testssl_report = json.loads(testssl_json.read())
req_key = [
'Invocation',
'at',
'version',
'openssl',
'startTime',
'scanTime'
]
header = ''
for key in req_key:
header += '{lbl}: {v}\n'.format(lbl=key.capitalize(), v=str(testssl_report.get(key, '')))
header += '\n\n'
scan_results_str = ''
scan_results = testssl_report['scanResult']
for target in scan_results:
scan_results_str += _format_target(target)
return header + scan_results_str
except BaseException as e:
print('Unable to parse JSON File')
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Error: {0} {1}'.format(e, exc_traceback.tb_lineno))
def manage_recon_results(recon_file, tool):
content = ""
if tool in ['nmap', 'wfuzz']:
with open(recon_file, 'r') as plainfile:
content = plainfile.read()
elif tool in ['sslyze', 'shodan']:
with open(recon_file, 'r') as json_file:
content = pp_json(json_file.read())
elif tool == 'nikto':
content = manage_nikto_xml_file(recon_file)
elif tool == 'testssl':
content = manage_testssl_json_file(recon_file)
return content
def manage_nodejsscan_results(nfile, target, session):
results = json.load(open(nfile, 'r'))
default_dict = {}
for vul_type, vul_list in results.get('sec_issues').items():
for individual_vul_details in vul_list:
title = individual_vul_details.get('title', '')
default_dict[title] = {
'description': individual_vul_details.get('description'),
'evidences': []
}
evidence = {'url': individual_vul_details.get('filename'),
'name': 'File: {0}, Line no: {1}'.format(individual_vul_details.get('path'),
individual_vul_details.get('line')),
'log': individual_vul_details.get('lines')}
default_dict[title]['evidences'] = [evidence]
for individual_vul_title, individual_vul_detail in default_dict.items():
vul_dict = Vulnerability()
vul_dict.name = individual_vul_title
vul_dict.tool = 'NodeJsScan'
vul_dict.severity = 2
vul_dict.target = target
vul_dict.description = individual_vul_detail['description']
all_evidences = individual_vul_detail['evidences']
vul_evidences = []
if len(all_evidences) > 0:
for single_evidence in all_evidences:
vul_evid = VulnerabilityEvidence()
vul_evid.url = single_evidence.get('url', '')
vul_evid.name = single_evidence.get('name', '')
vul_evid.log = b64encode(single_evidence.get('log', ''))
vul_evidences.append(vul_evid)
vul_dict.evidences = vul_evidences
vul_dict.session = session
vul_dict.save()
def manage_bandit_results(json_file, target, session):
results = json.load(open(json_file, 'r'))
default_dict = {}
sev_con_dict = {
'LOW': 1,
'MEDIUM': 2,
'HIGH': 3
}
for vul_result in results.get('results', []):
default_dict[(vul_result.get('test_id'))] = {'description': '', 'evidences': [], 'vul_name': '',
'confidence': 2, 'severity': 2}
evidence = {
'url': vul_result.get('filename', ''),
'name': 'File :{0}, Line no:{1}'.format(vul_result.get('filename', ''), vul_result.get('line_number', '')),
'log': vul_result.get('code', '')
}
test_id = vul_result.get('test_id')
default_dict[test_id]['description'] = vul_result.get('issue_text', '')
default_dict[test_id]['evidences'].append(evidence)
default_dict[test_id]['vul_name'] = vul_result.get('test_name', '')
default_dict[test_id]['confidence'] = sev_con_dict.get(
vul_result.get('issue_confidence', 'MEDIUM'))
default_dict[test_id]['severity'] = sev_con_dict.get(
vul_result.get('issue_severity', 'MEDIUM'))
for individual_vul_id, individual_vul_detail in default_dict.items():
vul_dict = Vulnerability()
vul_dict.name = individual_vul_detail.get('vul_name', ''),
vul_dict.tool = 'Bandit'
vul_dict.severity = individual_vul_detail.get('severity', 2),
vul_dict.target = target
vul_dict.description = individual_vul_detail.get('description', '')
all_evidences = individual_vul_detail.get('evidences', [])
vul_evidences = []
if all_evidences:
for single_evidence in all_evidences:
vul_evid = VulnerabilityEvidence()
vul_evid.url = single_evidence.get('url', '')
vul_evid.name = single_evidence.get('name', '')
vul_evid.log = b64encode(single_evidence.get('log', ''))
vul_evidences.append(vul_evid)
vul_dict.evidences = vul_evidences
vul_dict.session = session
vul_dict.save()
def manage_brakeman_results(json_file, target, session):
confidence_dict = {
"High": 3,
"Medium": 2,
"Low": 1
}
with open(json_file) as data_file:
data = json.load(data_file)
vuls = data.get('warnings', [])
default_dict = {}
for vul in vuls:
vul_name = vul.get('warning_type', 'Unknown')
if vul_name not in default_dict:
default_dict[vul_name] = {
'description': vul.get('message', ''),
'severity': confidence_dict.get(vul.get('confidence', 'Low'), 1),
'evids': []
}
filename = vul.get('file', '')
line_num = vul.get('line', '')
location = vul.get('location', {})
code = ''
if location is not None:
for key, value in location.items():
code += '{0} - {1} \n'.format(key, value)
code += 'code - {0}'.format(vul.get('code', '') or '')
evid_desc = 'File :{0}, Line :{1}'.format(filename, line_num)
default_dict[vul_name]['evids'].append(
{
'url': filename,
'name': evid_desc,
'log': code
}
)
for name, data in default_dict.items():
vul_dict = Vulnerability()
vul_dict.name = name,
vul_dict.tool = 'Brakeman'
vul_dict.severity = data.get('severity', 1),
vul_dict.target = target
vul_dict.description = data.get('description', '')
all_evidences = data.get('evids', [])
vul_evidences = []
if len(all_evidences) > 0:
for single_evidence in all_evidences:
vul_evid = VulnerabilityEvidence()
vul_evid.url = single_evidence.get('url', "")
vul_evid.name = single_evidence.get('name', "")
vul_evid.log = b64encode(single_evidence.get("log", ""))
vul_evidences.append(vul_evid)
vul_dict.evidences = vul_evidences
vul_dict.session = session
vul_dict.save()
def manage_burp_xml_file(xml_file, target, session, uri):
try:
nreport = xml.parse(xml_file)
root_elem = nreport.getroot()
reg_path = r'issue/name'
uniq_objs = root_elem.xpath(reg_path)
vuls = set([i.text for i in uniq_objs])
p = '{0}[text() = $name]'.format(reg_path)
severity_dict = {
'Information': 0,
'Low': 1,
'Medium': 2,
'High': 3
}
for v in vuls:
obj = root_elem.xpath(p, name=v)
# url_param_list = []
all_evidences = []
parent_obj = None
for u in obj:
parent_obj = u.getparent()
req = parent_obj.find('requestresponse/request')
res = parent_obj.find('requestresponse/response')
request = response = b64encode('')
if req is not None:
is_base64_encoded = True if req.get('base64') == 'true' else False
if is_base64_encoded:
request = req.text.decode('base64')
else:
request = req.text
is_base64_encoded = True if res.get('base64') == 'true' else False
if is_base64_encoded:
response = res.text.decode('base64')
else:
response = res.text
log = b64encode(request + '\n\n\n' + response.split('\r\n\r\n')[0] + '\n\n')
url = '%s%s' % (parent_obj.findtext('host', default=target), parent_obj.findtext('path', default=''))
all_evidences.append({
'url': url,
'log': log,
'param': parent_obj.findtext('location', default=''),
'attack': parent_obj.findtext('issueDetail', default=''),
'evidence': request + '\n\n\n' + response.split('\r\n\r\n')[0]
})
# TODO: Is this really the last parent_obj from the loop? Seems a bit arbitrary...
if not parent_obj:
return
vul_name = parent_obj.findtext('name', default='')
severity = parent_obj.findtext('severity', '')
if severity:
severity = severity_dict.get(severity)
desc = parent_obj.findtext('issueBackground', default='')
solution = parent_obj.findtext('remediationBackground', default='')
vul_dict = Vulnerability()
vul_dict.name = re.sub('<[^<]+?>', '', vul_name)
vul_dict.tool = "Burp"
vul_dict.severity = severity
vul_dict.description = re.sub('<[^<]+?>', '', desc)
vul_dict.observation = '' # TODO: Is there really nothing else to put in here?
vul_dict.remediation = re.sub('<[^<]+?>', '', solution)
vul_dict.target = target
vul_evidences = []
for single_evidence in all_evidences:
vul_evid = VulnerabilityEvidence()
vul_evid.url = single_evidence.get('url', '')
vul_evid.log = single_evidence.get('log', '')
vul_evid.param = single_evidence.get('param', '')
vul_evid.attack = single_evidence.get('attack', '')
vul_evid.evidence = single_evidence.get('evidence', '')
print(vul_evid)
vul_evidences.append(vul_evid)
vul_dict.evidences = vul_evidences
vul_dict.session = session
vul_dict.save()
except BaseException as e:
print('Unable to parse XML File')
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Error: {0} {1}'.format(e, exc_traceback.tb_lineno))
def manage_npm_audit_file(json_file, target, session):
results = json.load(open(json_file, 'r'))
all_advisories = results.get('advisories')
severity_dict = {
'moderate': 2,
'low': 1,
'critical': 3
}
default_dict = {}
for key, advisory in all_advisories.items():
title = advisory.get('title')
default_dict[title] = {
'description': '',
'evidences': [],
'remediation': '',
'severity': 2
}
evidences = []
for finding in advisory.get('findings'):
for path in finding.get('paths'):
evidence = {
'url': advisory.get('module_name'),
'name': 'File: {0}'.format(path)
}
evidences.append(evidence)
default_dict[title]['evidences'] = evidences
default_dict[title]['description'] = advisory.get('overview')
default_dict[title]['cwe'] = int(advisory.get('cwe', '').split('-')[-1])
default_dict[title]['remediation'] = advisory.get('recommendation')
default_dict[title]['severity'] = severity_dict.get(advisory.get('severity'), 2)
for individual_vul_title, individual_vul_detail in default_dict.items():
vul_dict = Vulnerability()
vul_dict.name = individual_vul_title
vul_dict.tool = 'Npm Audit'
vul_dict.severity = individual_vul_detail['severity']
vul_dict.target = target
vul_dict.cwe = individual_vul_detail.get('cwe', None)
vul_dict.description = individual_vul_detail['description']
vul_dict.remediation = individual_vul_detail['remediation']
all_evidences = individual_vul_detail['evidences']
vul_evidences = []
for single_evidence in all_evidences:
vul_evid = VulnerabilityEvidence()
vul_evid.url = single_evidence['url']
vul_evid.name = single_evidence['name']
vul_evidences.append(vul_evid)
vul_dict.session = session
vul_dict.save()
saved_vul = Vulnerability.objects.get(id = vul_dict.id)
all_linked_models = ThreatModel.objects(cwe=individual_vul_detail.get('cwe'))
second_link_models = ThreatModel.objects(related_cwes=individual_vul_detail.get('cwe'))
if len(all_linked_models) > 0:
rel_models = []
[rel_models.append(one) for one in all_linked_models]
model_ids = [model.id for model in rel_models]
saved_vul.models.extend(model_ids)
if len(second_link_models) > 0:
slink_models = []
[slink_models.append(one) for one in second_link_models]
s_model_ids = [model.id for model in slink_models]
saved_vul.models.extend(s_model_ids)
saved_vul.save()
def _map_alert_to_evidence(alert):
"""
Maps a list of alerts to a collection of evidence objects
:param list|dict alert: Alert dictionary or list of alert dictionaries
:return: list(VulnerabilityEvidence) Evidence collection translated from alerts
"""
evidence_collection = []
include_log = True
key_map = {
'URI': 'url',
'Param': 'param',
'Attack': 'attack',
'Evidence': 'evidence',
'OtherInfo': 'other_info',
}
if isinstance(alert['Item'], dict):
alert['Item'] = [alert['Item']]
include_log = False
for item in alert['Item']:
evidence = VulnerabilityEvidence()
# Copy alert data into evidence object
for ak, ek in key_map.items():
setattr(evidence, ek, item.get(ak))
# Log is only included if we got a list of alerts
evidence.log = b64encode(
'{0}\n{1}\n\n{2}'.format(
item.get('RequestHeader', ''),
item.get('RequestBody', ''),
item.get('ResponseHeader', '').encode('UTF-8')
)
) if include_log else None
evidence_collection.append(evidence)
return evidence_collection
def _format_scan_result(scan):
"""
Formats a scan result into a text block
:param xml.Element scan: A scan result
:return: str A text representation of the result
"""
target_map = {
'Target IP': 'targetip',
'Target Hostname': 'targethostname',
'Target Port': 'targetport',
'HTTP Server': 'targetbanner',
'Start Time': 'starttime',
'Site Link(name)': 'sitename',
'Site Link (IP)': 'siteip',
}
target = ''
for lbl, k in target_map.items():
target += '{0}: {1}\n'.format(lbl, scan.get(k, ''))
target += '\n'
for it in scan.findall('item'):
target += _format_item(it)
def _format_item(item):
"""
Formats an item as attached to
:param xml.Element item: XML-Element to extract data from
:return: str String representation of item details
"""
return '''URI: {uri}
HTTP Method: {method}
Description: {description}
Test Link: {namelink}
\t\t\t{iplink}
OSVDB Link: {osvdblink}
'''.format(
uri=item.findtext('uri', default=''),
method=item.get('method', default=''),
description=item.findtext('description', default=''),
namelink=item.findtext('namelink', default=''),
iplink=item.findtext('iplink', default=''),
osvdblink=item.get('osvdblink', default='')
)
def _format_target(target):
"""
Formats a target into a text block
:param xml.Element target: A scan result
:return: str String representation of the result
"""
target_map = {
'Target IP': 'targetip',
'Target Host': 'target host',
'IP': 'ip',
'Port': 'port',
'Service': 'service',
}
target_str = 'Target Info:\n\n'
for lbl, k in target_map.items():
target_str += '{0}: {1}\n'.format(lbl, target.get(k, ''))
target_str += '\n'
target_str += _format_key_data(target)
return target_str
def _format_key_data(target):
"""
Formats key data
:param dict target: Data dictionary to be formatted into a string
:return: str String representation of target
"""
key_data = [
'protocols',
'grease',
'ciphers',
'pfs',
'serverPreferences',
'serverDefaults',
'headerResponse',
'cipherTests',
'browserSimulations'
]
key_details = {
'ID': 'id',
'Severity': 'severity',
'CVE': 'cve',
'CWE': 'cwe',
'Finding': 'finding'
}
report = ''
for key in key_data:
results = target.get(key)
if not results:
break
report += key.capitalize() + ': ' + '\n\n'
for result in results:
for lbl, k in key_details.items():
report += '{lbl}: {r}\n'.format(lbl=lbl, r=result.get(k, ''))
report += '\n\n'
return report
``` |
{
"source": "joubin/VeloParse",
"score": 3
} |
#### File: VeloParse/Model/FireData.py
```python
from Model.Laser import Laser
from Parser.Parse import next_bytes, get_int_value
import binascii
class FireData(object):
"""
Based on Appendix B
http://velodynelidar.com/docs/manuals/63-9113%20HDL-32E%20manual_Rev%20H_Sept.pdf
FireData has the the block_id and rotation along with an Array of 32 @Laser objects
"""
def __init__(self, block_id, rotation, lasers):
"""
:param block_id: the actual id
:param rotation: the rotational angle
:param lasers: an array of 32 @Laser objects
:return:
"""
self.block_id = block_id
self.rotation = rotation
self.lasers = lasers
def __str__(self) -> str:
"""
:return: a human readable string
"""
result = "block id: " + str(self.block_id) \
+ " rotation: " + str(self.rotation) \
+ " lasers:[ "
for i in self.lasers:
result += " { "
result += str(i)
result += " } "
result += " ] "
return result
def __repr__(self, *args, **kwargs):
"""
calls the __str__ so that it prints correctly within the list
:param args:
:param kwargs:
:return:
"""
return self.__str__()
@classmethod
def create_with_date(cls, data) -> object:
"""
:param data: is the raw 1200 bytes of the udp packet
:return: a constructed class
"""
try:
data, rotation = next_bytes(data, 2)
data, block_id = next_bytes(data, 2)
# print(rotation)
rotation = get_int_value(rotation)
block_id = get_int_value(block_id)
# rotation = str(rotation)
# print(block_id)
lasers = []
for i in range(32):
data, distance = next_bytes(data, 2)
data, intensity = next_bytes(data, 1)
tmp = Laser.create_from_hex_data(distance, intensity)
lasers.append(tmp)
return cls(block_id, rotation, lasers)
except ValueError as ve:
return None
``` |
{
"source": "Jouca/Horse-Game",
"score": 2
} |
#### File: Horse-Game/ressources/paul.py
```python
import pygame
try:
from constant import COLOR, PLAYER_COLORS, PLATEAU_PLAYERS_COORDINATES
except ModuleNotFoundError:
from .constant import COLOR, PLAYER_COLORS, PLATEAU_PLAYERS_COORDINATES
dico_plato = {
"0": (22, 330), "1": (22, 286), "2": (66, 286),
"3": (110, 286), "4": (154, 286), "5": (198, 286),
"6": (242, 286), "7": (286, 286), "8": (286, 242),
"9": (286, 198), "10": (286, 154), "11": (286, 110),
"12": (286, 66), "13": (286, 22), "14": (330, 22),
"15": (374, 22), "16": (374, 66), "17": (374, 110),
"18": (374, 154), "19": (374, 198), "20": (374, 242),
"21": (374, 286), "22": (418, 286), "23": (462, 286),
"24": (506, 286), "25": (550, 286), "26": (594, 286),
"27": (638, 286), "28": (638, 330), "29": (638, 374),
"30": (594, 374), "31": (550, 374), "32": (506, 374),
"33": (462, 374), "34": (418, 374), "35": (374, 374),
"36": (374, 418), "37": (374, 462), "38": (374, 506),
"39": (374, 550), "40": (374, 594), "41": (374, 638),
"42": (330, 638), "43": (286, 638), "44": (286, 594),
"45": (286, 550), "46": (286, 506), "47": (286, 462),
"48": (286, 418), "49": (286, 374), "50": (242, 374),
"51": (198, 374), "52": (154, 374), "53": (110, 374),
"54": (66, 374), "55": (22, 374),
"1_yellow": (66, 330), "2_yellow": (110, 330), "3_yellow": (154, 330),
"4_yellow": (198, 330), "5_yellow": (242, 330), "6_yellow": (286, 330),
"1_blue": (330, 66), "2_blue": (330, 110), "3_blue": (330, 154),
"4_blue": (330, 198), "5_blue": (330, 242), "6_blue": (330, 286),
"1_red": (594, 330), "2_red": (550, 330), "3_red": (506, 330),
"4_red": (462, 330), "5_red": (418, 330), "6_red": (374, 330),
"1_green": (330, 594), "2_green": (330, 550), "3_green": (330, 506),
"4_green": (330, 462), "5_green": (330, 418), "6_green": (330, 374),}
def show_table(window):
plateau = pygame.Surface((660, 660))
frame = pygame.Surface((308, 352))
for couleur in enumerate(PLAYER_COLORS):
coord_x, coord_y, rotate = PLATEAU_PLAYERS_COORDINATES[couleur[0]]
for value in range(7):
rect_x = pygame.Rect(value * 44, 264, 44, 44)
rect_y = pygame.Rect(264, value * 44, 44, 44)
pygame.draw.circle(frame, COLOR[couleur[1]], rect_x.center, rect_x.width // 2)
pygame.draw.circle(frame, COLOR[couleur[1]], rect_y.center, rect_y.width // 2)
rect_final = pygame.Rect(0, 308, 44, 44)
pygame.draw.circle(frame, COLOR[couleur[1]], rect_final.center, rect_final.width // 2)
rect_stable = pygame.Rect(0, 0, 264, 264)
pygame.draw.rect(frame, COLOR["LIGHT_"+couleur[1]], rect_stable)
pygame.draw.rect(frame, COLOR[couleur[1]], rect_stable, 10)
for value in range(1, 7):
rect_case = pygame.Rect(44 * value, 308, 44, 44)
pygame.draw.rect(frame, COLOR["LIGHT_"+couleur[1]], rect_case)
pygame.draw.rect(frame, COLOR["BLACK"], rect_case, 2)
font = pygame.font.Font(None, 40)
text = font.render(str(value), True, COLOR["BLACK"])
text_rect = text.get_rect(center=rect_case.center)
frame.blit(text, text_rect)
plateau.blit(pygame.transform.rotate(frame, rotate), (coord_x, coord_y))
for l in dico_plato:
if dico_plato[l][0] != None:
pygame.draw.rect(plateau, COLOR["BLACK"], (dico_plato[l][0], dico_plato[l][1], 22, 22))
return plateau
``` |
{
"source": "Jouca/Mastermind",
"score": 3
} |
#### File: Jouca/Mastermind/main.py
```python
import random
import pygame
from itertools import product
from ressources.classes import Spritesheet, Button, Grille, Parallax
from ressources.classes import Selection, charger_ressource
# Couleurs
couleur = {
"black": pygame.Color(0, 0, 0),
"white": pygame.Color(255, 255, 255),
"red": pygame.Color(255, 0, 0),
"green": pygame.Color(0, 255, 0),
"blue": pygame.Color(0, 0, 255),
"yellow": pygame.Color(255, 255, 0),
"darkgray": pygame.Color(169, 169, 169),
"cyan": pygame.Color(0, 255, 255),
"brown": pygame.Color(153, 85, 51),
"dark_brown": pygame.Color(90, 54, 20),
"gold": pygame.Color(255, 200, 0)
}
# Indentifiants des couleurs
couleur_mastermind_ID = {
1: "red",
2: "blue",
3: "green",
4: "yellow",
5: "white",
6: "black"
}
screen_size = (1280, 720)
def init_screen():
"""
Initialise les paramètres de pygame.
"""
pygame.init()
pygame.mixer.pre_init(44100, -16, 2, 1024)
pygame.mixer.music.set_volume(0.7)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Mastermind")
pygame.display.set_icon(charger_ressource("/sprites/mastermind_logo.png"))
return screen
def init_fonts():
"""
Initialise les polices d'écritures.
"""
fonts = {
"big_generalfont": pygame.font.Font(
"./ressources/polices/PetMe64.ttf",
60
),
"normal_generalfont": pygame.font.Font(
"./ressources/polices/PetMe64.ttf",
40
),
"littlenormal_generalfont": pygame.font.Font(
"./ressources/polices/PetMe64.ttf",
35
),
"big_pusab": pygame.font.Font(
"./ressources/polices/Pusab.ttf",
60
),
"normal_pusab": pygame.font.Font(
"./ressources/polices/Pusab.ttf",
40
),
"littlenormal_pusab": pygame.font.Font(
"./ressources/polices/Pusab.ttf",
30
),
"littlebignormal_pusab": pygame.font.Font(
"./ressources/polices/Pusab.ttf",
35
),
"determination_font": pygame.font.Font(
"./ressources/polices/DeterminationMono.ttf",
60
),
"little_determination_font": pygame.font.Font(
"./ressources/polices/DeterminationMono.ttf",
30
),
"littlebig_determination_font": pygame.font.Font(
"./ressources/polices/DeterminationMono.ttf",
40
),
"littleshort_determination_font": pygame.font.Font(
"./ressources/polices/DeterminationMono.ttf",
36
),
}
return fonts
def init_texts(fonts):
"""
Initialise les textes du jeu.
"""
texts = {
"spamton_meme": fonts["determination_font"].render(
"SPAMTON DANCE",
True,
couleur["white"]
),
"alphalaneous": fonts["littlenormal_pusab"].render(
"Alphalaneous",
True,
couleur["gold"]
),
"alphalaneous_desc1": fonts["normal_pusab"].render(
"Pour certaines",
True,
couleur["gold"]
),
"alphalaneous_desc2": fonts["normal_pusab"].render(
"textures du jeu",
True,
couleur["gold"]
),
"hiroshi": fonts["littlenormal_pusab"].render(
"Hiroshi",
True,
couleur["gold"]
),
"hiroshi_desc1": fonts["normal_pusab"].render(
"Pour le logo",
True,
couleur["gold"]
),
"hiroshi_desc2": fonts["normal_pusab"].render(
"du jeu",
True,
couleur["gold"]
),
"robtopgames": fonts["littlenormal_pusab"].render(
"RobTop Games",
True,
couleur["gold"]
),
"robtopgames_desc1": fonts["normal_pusab"].render(
"Createur des",
True,
couleur["gold"]
),
"robtopgames_desc2": fonts["normal_pusab"].render(
"textures du jeu",
True,
couleur["gold"]
),
"jouca": fonts["littlenormal_pusab"].render(
"Jouca / Diego",
True,
couleur["gold"]
),
"jouca_desc1": fonts["normal_pusab"].render(
"Le developpeur",
True,
couleur["gold"]
),
"jouca_desc2": fonts["normal_pusab"].render(
"du jeu",
True,
couleur["gold"]
),
"select_tries": fonts["big_pusab"].render(
"Sélectionner le nombre d'essaies",
True,
couleur["gold"]
),
"select_difficulty": fonts["big_pusab"].render(
"Sélectionner la difficulté",
True,
couleur["gold"]
),
"ok_info": fonts["littlebignormal_pusab"].render(
": couleur bien placée",
True,
couleur["gold"]
),
"position_issue_info1": fonts["littlebignormal_pusab"].render(
": couleur présente",
True,
couleur["gold"]
),
"position_issue_info2": fonts["littlebignormal_pusab"].render(
"mais mal placée",
True,
couleur["gold"]
),
}
return texts
def init_buttons():
"""
Initialise les boutons du jeu.
"""
buttons = {
"credits": Button(
charger_ressource("./sprites/info.png"),
[1220, 660, 50, 50]
),
"returnMenu": Button(
charger_ressource("./sprites/cross.png"),
[20, 20, 50, 50]
),
"returnMenuJeu": Button(
charger_ressource("./sprites/cross.png"),
[1200, 20, 50, 50]
),
"returnMenuQuit": Button(
charger_ressource("./sprites/cross.png"),
[20, 20, 50, 50]
),
"play": Button(
charger_ressource("./sprites/play.png"),
[530, 280, 220, 220]
),
"10_tries": Button(
charger_ressource("./sprites/10_essaies.png"),
[400, 280, 220, 220]
),
"12_tries": Button(
charger_ressource("./sprites/12_essaies.png"),
[680, 280, 220, 220]
),
"facile": Button(
charger_ressource("./sprites/facile.png"),
[280, 280, 220, 220]
),
"normal": Button(
charger_ressource("./sprites/normal.png"),
[540, 280, 220, 220]
),
"difficile": Button(
charger_ressource("./sprites/difficile.png"),
[800, 280, 220, 220]
),
}
return buttons
def spamton_dance(var):
"""
Permet de gérer l'animation de Spamton.
"""
var["spamton_sprite"] = (var["spamton_sprite"] + 1) % 20
value = str(var["spamton_sprite"])
sprite = var["spamton_gif"].parse_sprite(f"spamton_troll{value}.png")
return var, sprite
def explosion(var):
"""
Permet de faire l'animation de l'explosion.
"""
explosion_sound = pygame.mixer.Sound("./ressources/sounds/explosion.ogg")
if var["explosion_fini"] == 0:
var["explosion_delay"] = (var["explosion_delay"] + 1) % 2
if var["explosion_delay"] == 1:
var["explosion_sprite"] = (var["explosion_sprite"] + 1) % 17
if var["explosion_sprite"] == 1:
explosion_sound.play()
value = str(var["explosion_sprite"])
sprite = var["explosion_gif"].parse_sprite(f"explosion{value}.png")
if var["explosion_sprite"] == 16:
var["explosion_fini"] = 1
else:
value = str(var["explosion_sprite"])
sprite = var["explosion_gif"].parse_sprite(f"explosion{value}.png")
return var, sprite
def affichage_menu(menu, var, screen, clock):
"""
Permet de gérer l'affichage des menus.
"""
if menu == "principal":
var = affichage_menu_principal(screen, var)
elif menu == "select_tries":
var = affichage_menu_select_tries(screen, var)
elif menu == "select_difficulty":
var = affichage_menu_select_difficulty(screen, var)
elif menu == "jeu":
var = affichage_menu_jeu(screen, var)
elif menu == "credits":
var = affichage_menu_credits(screen, var)
elif menu == "secret_spamton":
var = affichage_menu_spamton(screen, var)
if var["debug"] == 1:
screen.blit(update_fps(clock, var), (1220, 600))
pygame.display.flip()
return var
def affichage_menu_principal(screen, var):
"""
Affiche le menu principal.
"""
var["background_parallax"].draw(screen)
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/mastermind_titre.png"),
(838, 133)
),
(220, 80)
)
code = var["fonts"]["determination_font"].render(
var["secret_code"],
True,
couleur["green"]
)
terminal = var["fonts"]["determination_font"].render(
">",
True,
couleur["green"]
)
screen.blit(terminal, (30, 650))
screen.blit(code, (70, 650))
var["buttons"]["credits"].draw(screen)
var["buttons"]["play"].draw(screen)
var["buttons"]["returnMenuQuit"].draw(screen)
return var
def affichage_menu_select_tries(screen, var):
"""
Affiche le menu des sélections des essaies.
"""
var["background_parallax"].draw(screen)
screen.blit(var["texts"]["select_tries"], (120, 160))
var["buttons"]["10_tries"].draw(screen)
var["buttons"]["12_tries"].draw(screen)
var["buttons"]["returnMenu"].draw(screen)
return var
def affichage_menu_select_difficulty(screen, var):
"""
Affiche le menu des difficultés.
"""
var["background_parallax"].draw(screen)
screen.blit(var["texts"]["select_difficulty"], (220, 160))
var["buttons"]["facile"].draw(screen)
var["buttons"]["normal"].draw(screen)
var["buttons"]["difficile"].draw(screen)
var["buttons"]["returnMenu"].draw(screen)
return var
def affichage_menu_credits(screen, var):
"""
Affiche le menu des crédits.
"""
background = pygame.transform.scale(
charger_ressource("/sprites/background1.png"),
(1280, 1280)
)
screen.blit(background, (0, 0))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/credits.png"),
(482, 123)
),
(400, 40)
)
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/alphalaneous.png"),
(150, 150)
),
(100, 210)
)
screen.blit(var["texts"]["alphalaneous"], (60, 370))
screen.blit(var["texts"]["alphalaneous_desc1"], (270, 270))
screen.blit(var["texts"]["alphalaneous_desc2"], (270, 300))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/hiroshi.png"),
(150, 150)
),
(100, 410)
)
screen.blit(var["texts"]["hiroshi"], (115, 570))
screen.blit(var["texts"]["hiroshi_desc1"], (270, 470))
screen.blit(var["texts"]["hiroshi_desc2"], (270, 500))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/robtopgames.png"),
(150, 150)
),
(700, 210)
)
screen.blit(var["texts"]["robtopgames"], (660, 370))
screen.blit(var["texts"]["robtopgames_desc1"], (870, 270))
screen.blit(var["texts"]["robtopgames_desc2"], (870, 300))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/jouca.png"),
(150, 150)
),
(700, 410)
)
screen.blit(var["texts"]["jouca"], (670, 570))
screen.blit(var["texts"]["jouca_desc1"], (870, 470))
screen.blit(var["texts"]["jouca_desc2"], (870, 500))
var["buttons"]["returnMenu"].draw(screen)
return var
def affichage_menu_spamton(screen, var):
"""
Affiche le menu secret de Spamton.
"""
screen.fill(couleur["black"])
var, sprite = spamton_dance(var)
sprite = pygame.transform.scale(sprite, (400, 400))
screen.blit(sprite, (450, 30))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/spamton_angel.png"),
(150, 150)
),
(1000, 80)
)
screen.blit(
pygame.transform.flip(
pygame.transform.scale(
charger_ressource("/sprites/spamton_angel.png"),
(150, 150)
),
True,
False
),
(150, 80)
)
pygame.draw.rect(
screen,
couleur["white"],
pygame.Rect(400, 25, 500, 500),
2
)
screen.blit(var["texts"]["spamton_meme"], (485, 450))
var["buttons"]["returnMenu"].draw(screen)
return var
def affichage_menu_jeu(screen, var):
"""
Affiche le jeu.
"""
var["background_jeu_parallax"].draw(screen)
if var["explosion_sprite"] < 12:
var["grille"].draw(screen, var)
pygame.draw.rect(
screen,
couleur["dark_brown"],
pygame.Rect(750, 110, 500, 300)
)
pygame.draw.rect(
screen,
couleur["brown"],
pygame.Rect(755, 115, 490, 290)
)
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/checkmark.png"),
(55, 55)
),
(765, 215)
)
screen.blit(var["texts"]["ok_info"], (830, 230))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/info.png"),
(55, 55)
),
(765, 300)
)
screen.blit(var["texts"]["position_issue_info1"], (830, 315))
screen.blit(var["texts"]["position_issue_info2"], (870, 345))
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/mastermind_titre.png"),
(440, 72)
),
(775, 120)
)
var["selection"].draw(screen)
if var["gagné"] == 1:
var["selection"].draw_win(screen, var)
var["selection"].draw_checker(var)
# Ecriture de la surface score
score_surface_2 = pygame.Surface((205, 110))
score_surface_2.fill(couleur["dark_brown"])
score_surface = pygame.Surface((200, 100))
score_surface.fill(couleur["brown"])
screen.blit(score_surface_2, (550, 280))
# Ecriture du score dans la surface
score = var["fonts"]["littlebig_determination_font"].render(
f"Score : {var['score']}",
1,
couleur["gold"]
)
text_rect = score.get_rect(center=score_surface.get_rect().center)
score_surface.blit(score, (text_rect.x, text_rect.y))
screen.blit(score_surface, (555, 285))
elif var["explosion_sprite"] == 16:
pygame.draw.rect(
screen,
couleur["dark_brown"],
pygame.Rect(205, 235, 860, 310)
)
pygame.draw.rect(
screen,
couleur["brown"],
pygame.Rect(210, 240, 850, 300)
)
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/gameover.png"),
(752, 134)
),
(250, 270)
)
score = var["fonts"]["determination_font"].render(
f"Score : {var['score']}",
1,
couleur["gold"]
)
text_rect = score.get_rect(center=(var["width"]/2, 450))
screen.blit(score, text_rect)
if var["perdu"] != 1 and var["gagné"] != 1:
var["selection"].draw_buttons(screen)
var["buttons"]["returnMenuJeu"].draw(screen)
if var["perdu"] == 1 and var["explosion_fini"] == 0:
var, sprite = explosion(var)
if sprite is not None:
sprite = pygame.transform.scale(sprite, (700, 700))
screen.blit(sprite, (-150, -150))
screen.blit(sprite, (-150, 150))
screen.blit(sprite, (300, -150))
screen.blit(sprite, (650, 250))
screen.blit(sprite, (700, -100))
if var["debug"] == 1:
text = var["fonts"]["little_determination_font"].render(
str(var["code"]),
True,
couleur["red"]
)
text_rect = text.get_rect()
setattr(text_rect, "topright", (1280, 680))
screen.blit(text, text_rect)
return var
def generer_jeu(var):
"""
Génère les données de la partie du jeu.
"""
var = generer_code(var)
var["grille"] = Grille(var["nb_couleurs"], var["essaies"], var)
var["selection"] = Selection(var)
var["background_jeu_parallax"] = Parallax(
f"background{random.randint(1, 12)}.png",
4,
False
)
return var
def secret_codes(var, menu):
"""
Permet de gérer les secret codes du menu principal.
"""
if var["secret_code"] == "SPAMTON":
menu = "secret_spamton"
var["secret_code"] = ""
pygame.mixer.music.load("./ressources/sounds/spamton.ogg")
pygame.mixer.music.play(-1)
elif var["secret_code"] == "SUS":
var["secret_code"] = ""
sus = pygame.mixer.Sound("./ressources/sounds/sus.ogg")
sus.play()
elif var["secret_code"] == "GODMODE":
var["secret_code"] = ""
if var["godmode"] == 0:
var["godmode"] = 1
else:
var["godmode"] = 0
elif var["secret_code"] == "GASTER":
var["jeu_en_cours"] = False
elif var["secret_code"] == "DEBUG":
var["secret_code"] = ""
if var["debug"] == 0:
var["debug"] = 1
else:
var["debug"] = 0
elif len(var["secret_code"]) >= 10:
var["secret_code"] = ""
return var, menu
def generer_code(var):
"""
Génère le code de couleurs par le nombre de couleurs donnée.
"""
nb_couleurs = var["nb_couleurs"]
while nb_couleurs != 0:
var["code"].append(
couleur_mastermind_ID[random.randint(1, var["nb_couleurs"])]
)
nb_couleurs -= 1
return var
def controles_principal(menu, var, event):
"""
Occupation des contrôles du menu principal.
"""
if (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
) or var["buttons"]["returnMenu"].event_handler(event):
var["jeu_en_cours"] = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE:
var["secret_code"] = var["secret_code"][0:-1]
else:
var["secret_code"] += (event.unicode).upper()
var, menu = secret_codes(var, menu)
elif var["buttons"]["credits"].event_handler(event):
var["secret_code"] = ""
menu = "credits"
elif var["buttons"]["play"].event_handler(event):
var["secret_code"] = ""
menu = "select_difficulty"
return menu, var
def controles_select_tries(menu, var, event):
"""
Occupation des contrôles du menu des essaies.
"""
if (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
) or var["buttons"]["returnMenu"].event_handler(event):
menu = "select_difficulty"
elif var["buttons"]["10_tries"].event_handler(event):
menu = "jeu"
var["essaies"] = 10
var = generer_jeu(var)
elif var["buttons"]["12_tries"].event_handler(event):
var["essaies"] = 12
menu = "jeu"
var = generer_jeu(var)
return menu, var
def controles_select_difficulty(menu, var, event):
"""
Occupation des contrôles du menu des difficultés.
"""
if (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
) or var["buttons"]["returnMenu"].event_handler(event):
menu = "principal"
elif var["buttons"]["facile"].event_handler(event):
var["nb_couleurs"] = 4
menu = "select_tries"
elif var["buttons"]["normal"].event_handler(event):
var["nb_couleurs"] = 5
menu = "select_tries"
elif var["buttons"]["difficile"].event_handler(event):
var["nb_couleurs"] = 6
menu = "select_tries"
return menu, var
def controles_credits(menu, var, event):
"""
Occupation des contrôles du menu des crédits.
"""
if (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
) or var["buttons"]["returnMenu"].event_handler(event):
menu = "principal"
return menu, var
def controles_spamton(menu, var, event):
"""
Occupation des contrôles du menu secret de Spamton.
"""
if (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
) or var["buttons"]["returnMenu"].event_handler(event):
pygame.mixer.music.stop()
menu = "principal"
return menu, var
def controles_jeu(menu, var, event):
"""
Occupation des contrôles du jeu.
"""
if (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
) or var["buttons"]["returnMenuJeu"].event_handler(event):
var["perdu"] = 0
var["gagné"] = 0
var["grille"] = ""
var["explosion_fini"] = 0
var["explosion_sprite"] = 0
var["code"] = []
var["table"] = []
var["score"] = 0
var["essaies"] = 0
var["triche_ensemble"] = []
menu = "principal"
return menu, var
elif var["selection"]._rejouer.event_handler(event) and var["gagné"] == 1:
var["perdu"] = 0
var["gagné"] = 0
var["grille"] = ""
var["explosion_fini"] = 0
var["explosion_sprite"] = 0
var["code"] = []
var["table"] = []
var["triche_ensemble"] = []
var = generer_jeu(var)
menu = "jeu"
return menu, var
if var["perdu"] != 1 and var["gagné"] != 1:
for i in range(var["nb_couleurs"]):
if var["selection"]._buttons_haut[i].event_handler(event):
var["selection"].change_couleur_haut(i)
elif var["selection"]._buttons_bas[i].event_handler(event):
var["selection"].change_couleur_bas(i)
if var["selection"]._valider.event_handler(event) or (
event.type == pygame.KEYDOWN and event.key == pygame.K_t
):
var["selection"].valider(var["grille"])
var["selection"].check_validation(var["code"])
validation_liste = var["selection"]._validation_liste
if validation_liste["OK"] == var["nb_couleurs"]:
var["gagné"] = 1
var["score"] += 1
victory = pygame.mixer.Sound("./ressources/sounds/victory.ogg")
victory.play()
elif 0 not in var["grille"]._table[-1]:
var["perdu"] = 1
return menu, var
def controles(var, menu):
"""
Occupation de la gestion des contrôles par rapport à leur menu.
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
var["jeu_en_cours"] = False
if menu == "principal":
menu, var = controles_principal(menu, var, event)
elif menu == "select_tries":
menu, var = controles_select_tries(menu, var, event)
elif menu == "select_difficulty":
menu, var = controles_select_difficulty(menu, var, event)
elif menu == "jeu":
menu, var = controles_jeu(menu, var, event)
elif menu == "credits":
menu, var = controles_credits(menu, var, event)
elif menu == "secret_spamton":
menu, var = controles_spamton(menu, var, event)
return menu, var
def update_fps(clock, var):
"""
Met à jour les FPS du jeu.
"""
fps = str(int(clock.get_fps()))
fps_text = var["fonts"]["determination_font"].render(
fps,
1,
couleur["blue"]
)
return fps_text
def main():
"""
Fonction principale du lancement du jeu.
"""
screen = init_screen()
menu = "principal"
var = {
"width": screen_size[0],
"heigth": screen_size[1],
"jeu_en_cours": True,
"fonts": init_fonts(),
"texts": init_texts(init_fonts()),
"buttons": init_buttons(),
"background_parallax": Parallax("background1.png", 4, False),
"background_jeu_parallax": Parallax("background1.png", 4, False),
"essaies": 0,
"nb_couleurs": 4,
"selection": "",
"grille": "",
"code": [],
"table": [],
"checker": [],
"score": 0,
"perdu": 0,
"gagné": 0,
"explosion_gif": Spritesheet('./ressources/sprites/explosion.png'),
"explosion_sprite": 0,
"explosion_delay": 0,
"explosion_fini": 0,
"secret_code": "",
"spamton_gif": Spritesheet('./ressources/sprites/spamton_troll.png'),
"spamton_sprite": 0,
"godmode": 0,
"debug": 0,
"triche_ensemble": []
}
clock = pygame.time.Clock()
frame_per_second = 60
while var["jeu_en_cours"]:
clock.tick(frame_per_second)
var = affichage_menu(menu, var, screen, clock)
menu, var = controles(var, menu)
pygame.quit()
# --------------- DEMARRAGE --------------- #
main()
```
#### File: Mastermind/ressources/classes.py
```python
import json
import pygame
# Couleurs
couleur = {
"black": pygame.Color(0, 0, 0),
"white": pygame.Color(255, 255, 255),
"red": pygame.Color(255, 0, 0),
"green": pygame.Color(0, 255, 0),
"blue": pygame.Color(0, 0, 255),
"yellow": pygame.Color(255, 255, 0),
"darkgray": pygame.Color(169, 169, 169),
"cyan": pygame.Color(0, 255, 255),
"brown": pygame.Color(153, 85, 51),
"dark_brown": pygame.Color(90, 54, 20),
"gold": pygame.Color(255, 200, 0)
}
couleur_mastermind_ID = {
1: "red",
2: "blue",
3: "green",
4: "yellow",
5: "white",
6: "black"
}
def charger_ressource(path):
"""
Permet de lire les ressources du jeu.
>>> charger_ressource("/sprites/info.png")
"""
ressource = pygame.image.load(f"./ressources{path}")
return ressource
class Spritesheet:
"""
Objet s'occupant d'un fichier type spritesheet.
"""
def __init__(self, filename):
self.filename = filename
self.sprite_sheet = pygame.image.load(filename).convert()
self.meta_data = self.filename.replace('png', 'json')
with open(self.meta_data, encoding="utf-8") as fichier:
self.data = json.load(fichier)
fichier.close()
def get_sprite(self, x_position, y_position, width, heigth):
"""
Permet d'avoir le sprite avec sa position x, sa position y,
sa taille et sa hauteur.
"""
sprite = pygame.Surface((width, heigth))
sprite.set_colorkey((0, 0, 0))
sprite.blit(
self.sprite_sheet,
(0, 0),
(x_position, y_position, width, heigth)
)
return sprite
def parse_sprite(self, name):
"""
Permet de dessiner le sprite.
"""
sprite = self.data['frames'][name]['frame']
x_position, y_position = sprite["x"], sprite["y"]
w_position, h_position = sprite["w"], sprite["h"]
image = self.get_sprite(x_position, y_position, w_position, h_position)
return image
class Button:
"""
Objet s'occupant d'un bouton.
"""
def __init__(self, sprite, rect):
self._sprite = sprite
self._x = rect[0]
self._y = rect[1]
self._width = rect[2]
self._heigth = rect[3]
self._rect = pygame.Rect(rect[0], rect[1], rect[2], rect[3])
def draw(self, screen):
"""
Permet de dessiner le bouton sur une surface.
"""
rect_value = (self._width, self._heigth)
sprite = pygame.transform.scale(self._sprite, rect_value)
screen.blit(sprite, (self._x, self._y))
def event_handler(self, event):
"""
Permet de détecter si le joueur a fait un clique gauche
sur le bouton.
"""
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if self._rect.collidepoint(event.pos):
return True
return False
class Parallax:
"""
Objet s'occupant de l'animation d'un fond d'écran.
"""
def __init__(self, sprite, speed, flip=False):
self._sprite = sprite
self._speed = speed
self._bgs_width_position = [0, 1280]
background = charger_ressource(f"/sprites/{sprite}")
if flip is not False:
self._background1 = pygame.transform.scale(background, (1280, 720))
flipped_bg = pygame.transform.scale(background, (1280, 720))
self._background2 = pygame.transform.flip(flipped_bg, True, False)
else:
self._background1 = pygame.transform.scale(background, (1280, 720))
self._background2 = pygame.transform.scale(background, (1280, 720))
def draw(self, screen):
"""
Permet de dessiner le fond d'écran sur la surface.
"""
self._bgs_width_position[0] -= self._speed
self._bgs_width_position[1] -= self._speed
if self._bgs_width_position[0] <= -1280:
self._bgs_width_position[0] = 1280
elif self._bgs_width_position[1] <= -1280:
self._bgs_width_position[1] = 1280
screen.blit(self._background1, (self._bgs_width_position[0], 0))
screen.blit(self._background2, (self._bgs_width_position[1], 0))
class Grille:
"""
Objet s'occupant de la grille.
"""
def __init__(self, xdef, ydef, var):
self._xdef = xdef
self._ydef = ydef
self._table = [
[
0 for i in range(var["nb_couleurs"])
]
for j in range(var["essaies"])
]
self._checker = [
[
"X" for i in range(2)
]
for j in range(var["essaies"])
]
self._limx, self._limy = 375, 600
self._blocktaille, self._rects, self._rects_background = 100000, [], []
# Déduction de la taille d'une seule case
while True:
x_position = self._xdef * self._blocktaille
y_position = self._ydef * self._blocktaille
if x_position > self._limx or y_position > self._limy:
self._blocktaille -= 1
continue
break
# Remplissage de rects
debut_x = (self._limx - self._blocktaille * xdef) / 2
debut_y = (self._limy - self._blocktaille * ydef) / 2
for x1_position in range(0, y_position, self._blocktaille):
rect_selected = []
rect_background = []
for y1_position in range(0, x_position, self._blocktaille):
rect = pygame.Rect(
y1_position+debut_y+50,
x1_position+debut_x,
self._blocktaille - 5,
self._blocktaille - 5
)
rect_cote = pygame.Rect(
(y1_position+debut_y+50) - 3,
(x1_position+debut_x) - 3,
self._blocktaille + 2,
self._blocktaille + 2
)
rect_selected.append(rect)
rect_background.append(rect_cote)
self._rects.append(rect_selected)
self._rects_background.append(rect_background)
def draw(self, screen, var):
"""
Permet de dessiner la grille sur une surface.
"""
# placement des cases du jeu
for i in enumerate(self._table):
for k in enumerate(self._table[i[0]]):
pygame.draw.rect(
screen,
couleur["dark_brown"],
self._rects_background[i[0]][k[0]]
)
if self._table[i[0]][k[0]] == 0:
pygame.draw.rect(
screen,
couleur["brown"],
self._rects[i[0]][k[0]]
)
else:
pygame.draw.rect(
screen,
couleur[self._table[i[0]][k[0]]],
self._rects[i[0]][k[0]]
)
# Placement des valeurs du checker
x_space = self._rects[0][-1].width
x_position = self._rects[0][-1].x + x_space + 20
y_position = self._rects[0][-1].y
for i in enumerate(self._checker):
for k in enumerate(self._checker[i[0]]):
if self._checker[i[0]][k[0]] != "X":
valuechecker = var["fonts"]["determination_font"].render(
f"{self._checker[i[0]][k[0]]}",
1,
couleur["gold"]
)
screen.blit(valuechecker, (x_position, y_position))
x_position += x_space
x_position = self._rects[0][-1].x + x_space + 20
y_position = y_position + (
self._rects[1][-1].y - self._rects[0][-1].y
)
# Placement des deux logos pour signifier les information du checker
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/checkmark.png"),
(35, 35)
),
(x_position - 5, y_position + 5)
)
screen.blit(
pygame.transform.scale(
charger_ressource("/sprites/info.png"),
(35, 35)
),
((x_position + x_space) - 5, y_position + 5)
)
def put_colors(self, data):
"""
Permet de pouvoir mettre une couleur sur une case de la
grille.
"""
for i in enumerate(self._table):
for k in enumerate(self._table[i[0]]):
if self._table[i[0]][k[0]] == 0:
self._table[i[0]] = data
return
def put_checker(self, data):
"""
Permet de pouvoir mettre une valeur dans le checker.
"""
for i in enumerate(self._checker):
for k in enumerate(self._checker[i[0]]):
if self._checker[i[0]][k[0]] == "X":
self._checker[i[0]] = data
return
class Selection:
"""
Objet s'occupant de la sélection.
"""
def __init__(self, var):
self._nb_couleurs = var["nb_couleurs"]
self._colors = []
self._grille = var["grille"]
couleurs_id_liste = list(couleur_mastermind_ID.values())
self._couleurs = couleurs_id_liste[:var["nb_couleurs"]]
self._selection, self._buttons_haut = [], []
self._buttons_bas, self._validation_liste = [], {
"OK": 0,
"POSITION_ISSUE": 0
}
self._button_const_position = (759, 430)
x_position, y_position = self._button_const_position
xcons = x_position
while x_position <= xcons + (60 * self._nb_couleurs-1):
self._selection.append(self._couleurs)
self._buttons_haut.append(
Button(
charger_ressource("./sprites/haut.png"),
[x_position, y_position, 50, 50]
)
)
x_position += 60
x_position -= (60 * self._nb_couleurs)
y_position += 120
xcons = x_position
while x_position <= xcons + (60 * self._nb_couleurs-1):
self._buttons_bas.append(
Button(
charger_ressource("./sprites/bas.png"),
[x_position, y_position, 50, 50]
)
)
x_position += 60
self._surface = pygame.Surface((63 * len(self._buttons_haut), 265))
self._surface.fill(couleur["dark_brown"])
self._surface_temp = pygame.Surface(
(60 * len(self._buttons_haut), 255)
)
self._surface_temp.fill(couleur["brown"])
self._surface.blit(self._surface_temp, (5, 5))
self._surface_validation = pygame.Surface(
(
245,
160
)
)
self._surface_validation.fill(couleur["dark_brown"])
surface_temp_validation = pygame.Surface(
(
250,
150
)
)
surface_temp_validation.fill(couleur["brown"])
self._surface_validation.blit(surface_temp_validation, (5, 5))
self._surface_validation.blit(
pygame.transform.scale(
charger_ressource("/sprites/checker.png"),
(236, 57)
),
(4, 15)
)
self._checker_const_position = (12, 80)
x_position, y_position = self._checker_const_position
self._surface_validation.blit(
pygame.transform.scale(
charger_ressource("/sprites/checkmark.png"),
(45, 45)
),
(x_position+10, y_position)
)
self._surface_validation.blit(
pygame.transform.scale(
charger_ressource("/sprites/info.png"),
(45, 45)
),
(x_position+115, y_position)
)
self._valider = Button(
charger_ressource("./sprites/valider.png"),
[
60 * len(self._buttons_haut) + 610,
615,
140,
50
]
)
self._rejouer = Button(
charger_ressource("./sprites/rejouer.png"),
[
60 * len(self._buttons_haut) + 610,
615,
140,
50,
]
)
def draw_buttons(self, screen):
"""
Permet de dessiner les boutons de sélection des couleurs
sur une surface.
"""
x_position, y_position = self._button_const_position
for i in range(self._nb_couleurs):
self._buttons_haut[i].draw(screen)
self._buttons_bas[i].draw(screen)
pygame.draw.rect(
screen,
couleur[self._selection[i][0]],
(x_position + 2, y_position + 60, 45, 45)
)
x_position += 60
self._valider.draw(screen)
def draw_win(self, screen, var):
"""
Permet de dessiner la surface pour dire que l'utilisateur a gagné.
"""
wintext = var["fonts"]["littleshort_determination_font"].render(
"Félicitations !",
1,
couleur["gold"]
)
text_rect = wintext.get_rect(center=self._surface.get_rect().center)
self._surface.blit(wintext, (text_rect.x, text_rect.y))
self._rejouer.draw(screen)
def draw_checker(self, var):
"""
Permet de dessiner la vérification du checker des couleurs
sur la surface.
"""
position_issue_count = self._validation_liste["POSITION_ISSUE"]
ok_count = self._validation_liste["OK"]
pygame.draw.rect(
self._surface_validation,
couleur["brown"],
pygame.Rect(83, 72, 30, 50)
)
pygame.draw.rect(
self._surface_validation,
couleur["brown"],
pygame.Rect(189, 72, 30, 50)
)
ok_text = var["fonts"]["determination_font"].render(
str(ok_count),
1,
couleur["gold"]
)
position_issue_text = var["fonts"]["determination_font"].render(
str(position_issue_count),
1,
couleur["gold"]
)
self._surface_validation.blit(ok_text, (85, 72))
self._surface_validation.blit(position_issue_text, (190, 72))
def draw(self, screen):
"""
Permet de dessiner les outils sur une surface.
"""
screen.blit(self._surface, (750, 420))
screen.blit(self._surface_validation, (510, 110))
def change_couleur_haut(self, index):
"""
Permet de changer la couleur d'une sélection précise
de droite à gauche.
"""
test = self._selection[index].copy()
test.append(test.pop(0))
self._selection[index] = test
def change_couleur_bas(self, index):
"""
Permet de changer la couleur d'une sélection précise
de gauche à droite.
"""
test = self._selection[index].copy()
value = test.pop(-1)
test.insert(0, value)
self._selection[index] = test
def valider(self, grille):
"""
Permet de traiter la sélection quand le joueur aura cliqué
sur le bouton "Valider".
"""
self._colors = []
for i in enumerate(self._selection):
self._colors.append(self._selection[i[0]][0])
grille.put_colors(self._colors)
def check_validation(self, code):
"""
Permet de regarder et de traiter les réponses de la
sélection avec le code généré.
"""
validation_templist = {"OK": 0, "POSITION_ISSUE": 0}
test_code = code.copy()
position_issue_liste = []
for i in enumerate(self._colors):
if self._colors[i[0]] == test_code[i[0]]:
validation_templist["OK"] += 1
else:
position_issue_liste.append(i[0])
for i in enumerate(position_issue_liste):
wrong_place = False
for j in enumerate(position_issue_liste):
if self._colors[i[1]] == test_code[j[1]]:
wrong_place = True
if wrong_place:
validation_templist["POSITION_ISSUE"] += 1
self._validation_liste = validation_templist
self._grille.put_checker(
[
self._validation_liste["OK"],
self._validation_liste["POSITION_ISSUE"]
]
)
def change_selection(self, color):
"""
Permet de changer la sélection de couleur.
"""
index = 0
while index != self._nb_couleurs:
for i in range(self._nb_couleurs):
if self._selection[index][0] == color[index]:
index += 1
break
else:
self.change_couleur_haut(index)
``` |
{
"source": "Jouca/Tetris",
"score": 3
} |
#### File: Tetris/modules/diego.py
```python
import json
import pygame
import requests
try:
from constant import LANG
except ModuleNotFoundError:
from modules.constant import LANG
class GameStrings:
"""
Classe permettant de gérer les chaines de caractères du jeu.
"""
def __init__(self, language="FR"):
with open(
f"./others/game_string/{language}/game_strings.json",
"r", encoding="utf-8"
) as fichier:
self.data = json.load(fichier)
def get_string(self, key):
"""
Permet de récupérer une chaine de caractères à partir de sa clé.
"""
return self.data[key]
def get_all_strings(self):
"""
Permet de récupérer toutes les chaines de caractères.
"""
return self.data
game_strings = GameStrings(LANG)
class Spritesheet:
"""classe s'occupant d'un fichier type spritesheet."""
def __init__(self, filename):
self.filename = filename
self.sprite_sheet = pygame.image.load(filename).convert()
self.meta_data = self.filename.replace('png', 'json')
with open(self.meta_data, encoding="utf-8") as fichier:
self.data = json.load(fichier)
fichier.close()
def get_sprite(self, x_position, y_position, width, heigth):
"""
Permet d'avoir le sprite avec sa position x, sa position y,
sa taille et sa hauteur.
"""
sprite = pygame.Surface((width, heigth))
sprite.set_colorkey((0, 0, 0))
sprite.blit(
self.sprite_sheet,
(0, 0),
(x_position, y_position, width, heigth)
)
return sprite
def parse_sprite(self, name):
"""
Permet de dessiner le sprite.
"""
sprite = self.data['frames'][name]['frame']
x_position, y_position = sprite["x"], sprite["y"]
w_position, h_position = sprite["w"], sprite["h"]
image = self.get_sprite(
x_position,
y_position,
w_position,
h_position
)
return image
def clear_lines(content):
"""supprime les lignes remplies (valeur autre que 0) d'un
tableau `content`, les décale vers le bas, le tableau `content`
conserve le format initial, même nombre de lignes et de colonnes.
La fonction renvoie ce nouveau tableau ainsi que le nombre de
lignes supprimées.
>>> content = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
>>> clear_lines(content)
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
3"""
nb_line_clear = 0
copy_content = content.copy()
for i in range(len(content)):
if copy_content[i].count(0) == 0:
copy_content.pop(i)
copy_content.insert(0, [0] * len(content[0]))
nb_line_clear += 1
return copy_content, nb_line_clear
def post_request(url, data=None, headers=None):
"""
Permet d'envoyer une requête POST à l'url `url` avec les données
`data`.
"""
try:
response = requests.post(url, data=data, headers=headers, timeout=10)
return response.text, response.status_code
except requests.exceptions.ConnectionError:
return game_strings.get_string("error"), 0
def read_json(filename):
"""
Permet de lire un fichier json.
"""
with open(filename, encoding="utf-8") as fichier:
data = json.load(fichier)
fichier.close()
return dict(data)
def insert_local_score(score):
"""
Permet d'insérer un score dans le fichier `data.json`.
"""
data = read_json("./others/game_save/data.json")
for i in data:
if score >= int(data[i]):
data[i] = str(score).zfill(8)
break
with open("./others/game_save/data.json", "w", encoding="utf-8") as fichier:
json.dump(data, fichier, indent=4)
fichier.close()
``` |
{
"source": "jouir/mining-companion",
"score": 3
} |
#### File: mining-companion/companion/coingecko.py
```python
import logging
import requests
logger = logging.getLogger(__name__)
def get_rate(ids, vs_currencies):
logger.debug(f'getting {ids} price in {vs_currencies} on coingecko')
url = f'https://api.coingecko.com/api/v3/simple/price?ids={ids}&vs_currencies={vs_currencies}'
r = requests.get(url)
r.raise_for_status()
logger.debug(r.json())
return r.json()[ids.lower()][vs_currencies.lower()]
```
#### File: mining-companion/companion/config.py
```python
import json
import os
from jsonschema import validate
absolute_path = os.path.split(os.path.abspath(__file__))[0]
class InvalidConfigException(Exception):
pass
def read_config(filename=None):
if filename and os.path.isfile(filename):
with open(filename, 'r') as fd:
return json.load(fd)
else:
return {}
def validate_config(config):
if config is None:
raise InvalidConfigException('config is not a dict')
with open(os.path.join(absolute_path, 'config.schema.json'), 'r') as fd:
schema = json.loads(fd.read())
validate(instance=config, schema=schema)
```
#### File: companion/pools/ethermine.py
```python
import logging
from datetime import datetime
from ethermine import Ethermine
from humanfriendly import format_timespan
from pools import Handler
from utils import convert_fiat, format_weis
logger = logging.getLogger(__name__)
eth = Ethermine()
class Miner:
def __init__(self, address, exchange_rate=None, currency=None):
self.address = address
self.raw_balance = self.get_unpaid_balance(address)
self.balance = format_weis(self.raw_balance)
self.balance_fiat = None
if exchange_rate and currency:
self.balance_fiat = convert_fiat(amount=self.raw_balance, exchange_rate=exchange_rate, currency=currency)
payout_threshold = self.get_payout_threshold(address)
self.balance_percentage = self.format_balance_percentage(payout_threshold=payout_threshold,
balance=self.raw_balance)
self.transactions = self.get_payouts(address, exchange_rate, currency)
@staticmethod
def get_unpaid_balance(address):
dashboard = eth.miner_dashboard(address)
return dashboard['currentStatistics']['unpaid']
@staticmethod
def get_payouts(address, exchange_rate=None, currency=None):
payouts = eth.miner_payouts(address)
# convert to transactions
transactions = []
for payout in payouts:
transaction = Transaction(txid=payout['txHash'], timestamp=payout['paidOn'], amount=payout['amount'],
duration=payout['end']-payout['start'], exchange_rate=exchange_rate,
currency=currency)
transactions.append(transaction)
# sort by older timestamp first
return sorted(transactions)
@staticmethod
def get_payout_threshold(address):
return eth.miner_settings(address)['minPayout']
@staticmethod
def format_balance_percentage(payout_threshold, balance):
return f'{round(balance*100/payout_threshold, 2)}%'
@property
def url(self):
return f'https://ethermine.org/miners/{self.address}/dashboard'
@property
def last_transaction(self):
if self.transactions:
return self.transactions[-1]
def __repr__(self):
attributes = {'balance': self.balance, 'raw_balance': self.raw_balance,
'balance_percentage': self.balance_percentage, 'url': self.url}
if self.balance_fiat:
attributes['balance_fiat'] = self.balance_fiat
formatted_attributes = ' '.join([f'{k}="{v}"' for k, v in attributes.items()])
return f'<Miner #{self.address} ({formatted_attributes})>'
class Transaction:
def __init__(self, txid, amount, timestamp, duration, exchange_rate=None, currency=None):
self.txid = txid
self.time = datetime.fromtimestamp(timestamp)
self.raw_amount = amount
self.amount = format_weis(amount)
self.amount_fiat = None
self.duration = format_timespan(duration)
if exchange_rate and currency:
self.amount_fiat = convert_fiat(amount=self.raw_amount, exchange_rate=exchange_rate, currency=currency)
def __lt__(self, trx):
"""Order by datetime asc"""
return self.time < trx.time
def __eq__(self, trx):
return self.txid == trx.txid
def __repr__(self):
attributes = {'time': self.time, 'amount': self.amount, 'raw_amount': self.raw_amount,
'duration': self.duration}
if self.amount_fiat:
attributes['amount_fiat'] = self.amount_fiat
formatted_attributes = ' '.join([f'{k}="{v}"' for k, v in attributes.items()])
return f'<Transaction #{self.txid} ({formatted_attributes})>'
class EthermineHandler(Handler):
def __init__(self, exchange_rate=None, currency=None, notifier=None, pool_name='ethermine'):
super().__init__(pool_name=pool_name, exchange_rate=exchange_rate, currency=currency, notifier=notifier)
def watch_blocks(self, last_block=None):
logger.debug('not implemented yet')
def watch_miner(self, address, last_balance=None, last_transaction=None):
logger.debug(f'watching miner {address}')
try:
miner = Miner(address=address, exchange_rate=self.exchange_rate, currency=self.currency)
except Exception as err:
logger.error(f'miner {address} not found')
logger.exception(err)
return
logger.debug(miner)
last_balance = self._watch_miner_balance(miner=miner, last_balance=last_balance)
last_transaction = self._watch_miner_payments(miner=miner, last_transaction=last_transaction)
return last_balance, last_transaction
```
#### File: mining-companion/companion/telegram.py
```python
import logging
import os
from copy import copy
import requests
from jinja2 import Environment, FileSystemLoader
logger = logging.getLogger(__name__)
absolute_path = os.path.split(os.path.abspath(__file__))[0]
class TelegramNotifier:
def __init__(self, chat_id, auth_key):
self._auth_key = auth_key
self._default_payload = {'auth_key': auth_key, 'chat_id': chat_id, 'parse_mode': 'MarkdownV2',
'disable_web_page_preview': True}
@staticmethod
def _markdown_escape(text):
text = str(text)
for special_char in ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '#', '+', '-', '.', '!', '=']:
text = text.replace(special_char, fr'\{special_char}')
return text
def _generate_payload(self, message_variables, template_name):
payload = copy(self._default_payload)
template_path = os.path.join(absolute_path, 'templates')
loader = FileSystemLoader(template_path)
env = Environment(loader=loader)
template = env.get_template(template_name)
template_variables = {}
for key, value in message_variables.items():
template_variables[key] = self._markdown_escape(value)
text = template.render(**template_variables)
payload['text'] = text
return payload
def notify_block(self, pool, number, hash, reward, time, round_time, luck, reward_fiat=None):
message_variables = {'pool': pool, 'number': number, 'hash': hash, 'reward': reward, 'time': time,
'round_time': round_time, 'luck': luck, 'reward_fiat': reward_fiat}
payload = self._generate_payload(message_variables, 'block.md.j2')
self._send_message(payload)
def notify_balance(self, pool, address, url, balance, balance_percentage, balance_fiat=None):
message_variables = {'pool': pool, 'address': address, 'url': url, 'balance': balance,
'balance_percentage': balance_percentage, 'balance_fiat': balance_fiat}
payload = self._generate_payload(message_variables, 'balance.md.j2')
self._send_message(payload)
def notify_payment(self, pool, address, txid, amount, time, duration, amount_fiat=None):
message_variables = {'pool': pool, 'address': address, 'txid': txid, 'amount': amount,
'amount_fiat': amount_fiat, 'time': time, 'duration': duration}
payload = self._generate_payload(message_variables, 'payment.md.j2')
self._send_message(payload)
def _send_message(self, payload):
logger.debug(self._sanitize(payload))
r = requests.post(f'https://api.telegram.org/bot{self._auth_key}/sendMessage', json=payload)
r.raise_for_status()
@staticmethod
def _sanitize(payload):
payload = copy(payload)
if 'auth_key' in payload:
payload['auth_key'] = '***'
return payload
```
#### File: mining-companion/companion/utils.py
```python
def convert_weis(weis, precision=5):
return round(weis / 10**18, precision)
def format_weis(weis, precision=5):
amount = convert_weis(weis=weis, precision=precision)
if amount == int(amount):
amount = int(amount)
return f'{amount} ETH'
def convert_fiat(amount, exchange_rate, currency):
converted = round(convert_weis(amount)*exchange_rate, 2)
converted = f'{converted} {currency}'
return converted
``` |
{
"source": "Joukahainen/ccxt",
"score": 3
} |
#### File: ccxt/test/test_position.py
```python
import numbers # noqa: E402
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
def test_position(exchange, position, symbol, now):
assert position
assert 'id' in position
assert position['id'] is None or isinstance(position['id'], basestring)
assert 'timestamp' in position
assert isinstance(position['timestamp'], numbers.Real)
assert position['timestamp'] > 1230940800000 # 03 Jan 2009 - first cryptocurrency block creation time
assert position['timestamp'] < now
assert 'datetime' in position
assert position['datetime'] == exchange.iso8601(position['timestamp'])
assert 'symbol' in position
assert symbol is None or position['symbol'] == symbol
assert 'info' in position
assert position['info']
``` |
{
"source": "Joukahainen/chartpy",
"score": 2
} |
#### File: chartpy/dashboard/layoutcanvas.py
```python
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2021 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
from abc import ABC, abstractmethod
import pandas as pd
from chartpy.dashboard.sketchcomponents import SketchComponents
class LayoutCanvas(ABC):
"""Abstract class which is used to create a web dashboard, to specify the layout and also to write the callbacks
which react to user events (like clicking buttons and selecting from dropdown boxes).
"""
def __init__(self, app=None, constants=None, url_prefix=''):
self._app = app
self._sc = SketchComponents(app, constants=constants, url_prefix=url_prefix)
self._constants = constants
def flatten_list_of_strings(self, list_of_lists):
"""Flattens lists of strings, into a single list of strings (rather than characters, which is default behavior).
Parameters
----------
list_of_lists : str (list)
List to be flattened
Returns
-------
str (list)
"""
rt = []
for i in list_of_lists:
if isinstance(i, list):
rt.extend(self.flatten_list_of_strings(i))
else:
rt.append(i)
return rt
def convert_boolean(self, boolean_val):
boolean_val = boolean_val.lower()
if boolean_val == 'true' or boolean_val == 'yes':
return True
elif boolean_val == 'false' or boolean_val == 'no':
return False
def convert_date(self, date_val, date=True):
if date:
if date_val is None:
return date_val
if isinstance(date_val, str):
if ',' in date_val or ";" in date_val:
date_val = date_val.split(',')
return [pd.Timestamp(pd.Timestamp(d).date()) for d in date_val]
return pd.Timestamp(pd.Timestamp(date_val).date())
elif isinstance(date_val, list):
return [pd.Timestamp(pd.Timestamp(d).date()) for d in date_val]
return date_val
def convert_float(self, float_val):
if isinstance(float_val, float):
return float_val
if isinstance(float_val, list):
return [float(i) for i in float_val]
if ',' in float_val:
float_val = float_val.split(',')
return [float(i) for i in float_val]
return float(float_val)
def convert_int(self, int_val, listify=False):
if listify:
if not(isinstance(int_val, list)):
int_val = [int_val]
if isinstance(int_val, int):
return int_val
if isinstance(int_val, list):
return [int(i) for i in int_val]
if ',' in int_val:
int_val = int_val.split(',')
return [int(i) for i in int_val]
return int(int_val)
def convert_str(self, str_val, listify=False):
if str_val is None:
return None
if listify:
if not(isinstance(str_val, list)):
str_val = [str_val]
if isinstance(str_val, list):
return str_val
if ',' in str_val:
str_val = str_val.split(',')
return [x.strip() for x in str_val]
if ';' in str_val:
str_val = str_val.split(';')
return [x.strip() for x in str_val]
return str_val
@abstractmethod
def calculate_button(self):
pass
@abstractmethod
def page_name(self):
pass
@abstractmethod
def attach_callbacks(self):
pass
@abstractmethod
def construct_layout(self):
pass
``` |
{
"source": "Joukahainen/findatapy",
"score": 2
} |
#### File: findatapy/util/tickerfactory.py
```python
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas
import numpy
import pandas as pd
from findatapy.util.loggermanager import LoggerManager
class TickerFactory(object):
"""Creating lists of tickers from a shortcuts and also from Excel files.
"""
def create_tickers_from_combinations(self, csv_file, out_csv_file):
# reader = csv.DictReader(open(csv_file))
if isinstance(csv_file, pd.DataFrame):
data_frame = csv_file
else:
data_frame = pandas.read_csv(csv_file, dtype=object)
rows = 100000
# category data_source freq ticker cut fields vendor_tickers
data_frame_out = pandas.DataFrame(index=numpy.arange(0, rows),
columns=('category', 'data_source', 'freq', 'tickers', 'cut', 'fields', 'vendor_tickers'))
i = 0
for category_source_freq_fields in data_frame['category.data_source.freq.fields']:
if isinstance(category_source_freq_fields, str):
spl = category_source_freq_fields.split('.')
category = spl[0]
data_source = spl[1]
freq = spl[2]
fields = spl[3]
for cut_postfix in data_frame['cut.postfix']:
if isinstance(cut_postfix, str):
spl1 = cut_postfix.split('.')
cut = spl1[0]
postfix = spl1[1]
for ticker, st in zip(data_frame['tickers'], data_frame['vendor_tickers']):
if isinstance(ticker, str):
if 'midfix' in data_frame.columns:
for midfix in data_frame['midfix']:
if isinstance(midfix, str):
for postmidfix in data_frame['postmidfix']:
if isinstance(postmidfix, str):
ticker_ext = ticker + midfix + postmidfix
vendor_tickers = st + midfix + postmidfix + ' ' + postfix
data_frame_out.loc[i] = [category, data_source, freq, ticker_ext,
cut, fields, vendor_tickers]
i = i + 1
else:
for postmidfix in data_frame['postmidfix']:
if isinstance(postmidfix, str):
ticker_ext = ticker + postmidfix
vendor_tickers = st + postmidfix + ' ' + postfix
data_frame_out.loc[i] = [category, data_source, freq, ticker_ext, cut, fields,
vendor_tickers]
i = i + 1
data_frame_out = data_frame_out[0:i]
if out_csv_file is not None:
data_frame_out.to_csv(out_csv_file)
return data_frame_out
# for line in reader:
# category = line["category"]
# data_source = line["data_source"]
# freq = line["freq"]
# ticker = line["ticker"]
# cut = line["cut"]
# vendor_tickers = line["vendor_tickers"]
def aggregate_ticker_excel(self, excel_file, out_csv, sheets=[], skiprows=None, cols=None):
df_list = []
logger = LoggerManager.getLogger(__name__)
for sh in sheets:
logger.info("Reading from " + sh + " in " + excel_file)
df = pd.read_excel(excel_file, sheet_name=sh, skiprows=skiprows)
df = df.dropna(how='all')
if 'maker' in sh:
df = self.create_tickers_from_combinations(df, None)
df_list.append(df)
df = pd.concat(df_list)
if cols is not None:
df = df[cols]
df = df.reset_index()
if isinstance(out_csv, list):
for out in out_csv:
logger.info("Writing to " + out)
df.to_csv(out)
else:
logger.info("Writing to " + out_csv)
df.to_csv(out_csv)
return df
``` |
{
"source": "Joukahainen/finmarketpy",
"score": 3
} |
#### File: finmarketpy/finmarketpy_examples/fx_options_indices_examples.py
```python
__author__ = 'saeedamen'
#
# Copyright 2020 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Shows how to use finmarketpy to total return indices for FX vanilla options (uses FinancePy underneath), so we can
see the historical P&L from for example, rolling a 1M call option etc.
Note, you will need to have a Bloomberg terminal (with blpapi Python library) to download the FX market data in order
to generate the FX option prices, which are used underneath (FX spot, FX forwards, FX implied volatility quotes and deposit rates)
"""
import pandas as pd
# For plotting
from chartpy import Chart, Style
# For loading market data
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from findatapy.timeseries import Filter, Calculations, RetStats
from findatapy.util.loggermanager import LoggerManager
from finmarketpy.curve.fxoptionscurve import FXOptionsCurve
from finmarketpy.curve.volatility.fxvolsurface import FXVolSurface
from finmarketpy.curve.volatility.fxoptionspricer import FXOptionsPricer
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='plotly')
market = Market(market_data_generator=MarketDataGenerator())
# Choose run_example = 0 for everything
# run_example = 1 - create total return index AUDUSD 1M long calls (and separately long puts) over 2008 financial crisis and further
# run_example = 2 - create total return index USDJPY 1W short straddles over a long sample
run_example = 0
def prepare_indices(cross, df_option_tot=None, df_option_tc=None, df_spot_tot=None):
df_list = []
if df_option_tot is not None:
df_list.append(pd.DataFrame(df_option_tot[cross + '-option-tot.close']))
df_list.append(pd.DataFrame(df_option_tot[cross + '-option-delta-tot.close']))
df_list.append(pd.DataFrame(df_option_tot[cross + '-delta-pnl-index.close']))
if df_option_tc is not None:
df_list.append(pd.DataFrame(df_option_tc[cross + '-option-tot-with-tc.close']))
df_list.append(pd.DataFrame(df_option_tc[cross + '-option-delta-tot-with-tc.close']))
df_list.append(pd.DataFrame(df_option_tc[cross + '-delta-pnl-index-with-tc.close']))
if df_spot_tot is not None:
df_list.append(df_spot_tot)
df = calculations.join(df_list, how='outer').fillna(method='ffill')
return calculations.create_mult_index_from_prices(df)
###### Fetch market data for pricing AUDUSD options in 2007 (ie. FX spot, FX forwards, FX deposits and FX vol quotes)
###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation
###### Enters a long 1M call, and MTM every day, and at expiry rolls into another long 1M call
if run_example == 1 or run_example == 0:
# Warning make sure you choose dates, where there is full vol surface! If points are missing interpolation
# will fail
start_date = '01 Jan 2007'; finish_date = '31 Dec 2020' # Use smaller window for quicker execution
cross = 'AUDUSD'
fx_options_trading_tenor = '1M'
# Download the whole all market data for AUDUSD for pricing options (FX vol surface + spot + FX forwards + depos)
md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date,
data_source='bloomberg', cut='BGN', category='fx-vol-market',
tickers=cross, fx_vol_tenor=['1W', '1M', '3M'],
cache_algo='cache_algo_return', base_depos_currencies=[cross[0:3], cross[3:6]])
df_vol_market = market.fetch_market(md_request)
df_vol_market = df_vol_market.fillna(method='ffill')
# Remove New Year's Day and Christmas
df_vol_market = Filter().filter_time_series_by_holidays(df_vol_market, cal='FX')
# Get a total return index for trading spot
# This way we can take into account carry when calculating delta hedging P&L
md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date,
data_source='bloomberg', cut='NYC', category='fx-tot',
tickers=cross,
cache_algo='cache_algo_return')
df_tot = market.fetch_market(md_request)
df_vol_market = df_vol_market.join(df_tot, how='left')
df_vol_market = df_vol_market.fillna(method='ffill')
# We want to roll long 1M ATM call at expiry
# We'll mark to market the option price through the month by interpolating between 1W and 1M (and using whole vol curve
# at each tenor)
fx_options_curve = FXOptionsCurve(fx_options_trading_tenor=fx_options_trading_tenor,
roll_days_before=0,
roll_event='expiry-date',
roll_months=1,
fx_options_tenor_for_interpolation=['1W', '1M'],
strike='atm',
contract_type='european-call',
depo_tenor_for_option='1M',
position_multiplier=1.0,
tot_label='tot',
output_calculation_fields=True)
# Let's trade a long 1M call, and we roll at expiry
df_cuemacro_option_call_tot = fx_options_curve.construct_total_return_index(cross, df_vol_market)
# Add transaction costs to the option index (bid/ask bp for the option premium and spot FX)
df_cuemacro_option_call_tc = fx_options_curve.apply_tc_signals_to_total_return_index(cross, df_cuemacro_option_call_tot,
option_tc_bp=5, spot_tc_bp=2)
# Let's trade a long 1M OTM put, and we roll at expiry
df_cuemacro_option_put_tot = fx_options_curve.construct_total_return_index(
cross, df_vol_market, contract_type='european-put', strike='10d-otm', position_multiplier=1.0)
# Add transaction costs to the option index (bid/ask bp for the option premium and spot FX)
df_cuemacro_option_put_tc = fx_options_curve.apply_tc_signals_to_total_return_index(cross, df_cuemacro_option_put_tot,
option_tc_bp=5, spot_tc_bp=2)
# Get total returns for spot
df_bbg_tot = df_tot # from earlier!
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
# Calculate a hedged portfolio of spot + 2*options (can we reduce drawdowns?)
calculations = Calculations()
ret_stats = RetStats()
df_hedged = calculations.join([df_bbg_tot[cross + '-tot.close-bbg'].to_frame(), df_cuemacro_option_put_tc[cross + '-option-tot-with-tc.close'].to_frame()], how='outer')
df_hedged = df_hedged.fillna(method='ffill')
df_hedged = df_hedged.pct_change()
df_hedged['Spot + 2*option put hedge'] = df_hedged[cross + '-tot.close-bbg'] + df_hedged[cross + '-option-tot-with-tc.close']
df_hedged.columns = RetStats(returns_df=df_hedged, ann_factor=252).summary()
# Plot everything
# P&L from call
chart.plot(calculations.create_mult_index_from_prices(
prepare_indices(cross=cross, df_option_tot=df_cuemacro_option_call_tot,
df_option_tc=df_cuemacro_option_call_tc, df_spot_tot=df_bbg_tot)))
# P&L from put option, put option + TC and total returns from spot
chart.plot(calculations.create_mult_index_from_prices(
prepare_indices(cross=cross,df_option_tot=df_cuemacro_option_put_tot,
df_option_tc=df_cuemacro_option_put_tc, df_spot_tot=df_bbg_tot)))
# P&L from put option + TC and total returns from spot
chart.plot(calculations.create_mult_index_from_prices(
prepare_indices(cross=cross, df_option_tc=df_cuemacro_option_put_tc, df_spot_tot=df_bbg_tot)))
# P&L for total returns from spot and total returns from + 2*put option + TC (ie. hedged portfolio)
chart.plot(calculations.create_mult_index(df_hedged))
# Plot delta from put option
chart.plot(df_cuemacro_option_put_tot[cross + '-delta.close'])
###### Fetch market data for pricing EURUSD options from 2006-2020 (ie. FX spot, FX forwards, FX deposits and FX vol quotes)
###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation
###### Enters a short 1W straddle, and MTM every day, and at expiry rolls into another 1W straddle
if run_example == 2 or run_example == 0:
# Warning make sure you choose dates, where there is full vol surface! If vol points in the tenors you are looking at
# are missing then interpolation will fail (or if eg. spot data is missing etc.)
start_date = '08 Mar 2007'; finish_date = '31 Dec 2020' # Monday
# start_date = '09 Mar 2007'; finish_date = '31 Dec 2014'
# start_date = '04 Jan 2006'; finish_date = '31 Dec 2008'
# start_date = '01 Jan 2007'; finish_date = '31 Dec 2007' # Use smaller window for quicker execution
cross = 'USDJPY'
fx_options_trading_tenor = '1W' # Try changing between 1W and 1M!
# Download the whole all market data for USDJPY for pricing options (FX vol surface + spot + FX forwards + depos)
md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date,
data_source='bloomberg', cut='10AM', category='fx-vol-market',
tickers=cross, fx_vol_tenor=['1W', '1M'], base_depos_tenor=['1W', '1M'],
cache_algo='cache_algo_return', base_depos_currencies=[cross[0:3], cross[3:6]])
df = market.fetch_market(md_request)
# Fill data for every workday and use weekend calendar (note: this is a bit of a fudge, filling down)
# CHECK DATA isn't missing at start of series
df = df.resample('B').last().fillna(method='ffill')
df = df[df.index >= '09 Mar 2007'] # Try starting on a different day of the week & see how it impact P&L?
cal = 'WKD'
# Remove New Year's Day and Christmas
# df = Filter().filter_time_series_by_holidays(df, cal='FX')
# We want to roll a short 1W option at expiry
# If we select longer dates, it will mark to market the price through the month by interpolating between eg. 1W and 1M
# (and using whole vol curve at each tenor)
fx_options_curve = FXOptionsCurve(fx_options_trading_tenor=fx_options_trading_tenor,
roll_days_before=0,
roll_event='expiry-date',
roll_months=1, # This is ignored if we roll on expiry date
fx_options_tenor_for_interpolation=['1W', '1M'],
strike='atm',
contract_type='european-straddle',
position_multiplier=-1.0, # +1.0 for long options, -1.0 for short options
output_calculation_fields=True,
freeze_implied_vol=True,
cal=cal,
cum_index='mult')
# Let's trade a short straddle, and we roll at expiry
df_cuemacro_option_straddle_tot = fx_options_curve.construct_total_return_index(cross, df,
depo_tenor_for_option=fx_options_trading_tenor)
# Add transaction costs to the option index (bid/ask bp for the option premium and spot FX)
# Have wider spread for straddle (because adding call + put)
df_cuemacro_option_straddle_tc = fx_options_curve.apply_tc_signals_to_total_return_index(cross, df_cuemacro_option_straddle_tot,
option_tc_bp=10, spot_tc_bp=2)
# Get total returns for spot
md_request.abstract_curve = None
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
md_request.cut = 'NYC'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
calculations = Calculations()
df_index = calculations.create_mult_index_from_prices(
prepare_indices(cross=cross, df_option_tc=df_cuemacro_option_straddle_tc, df_spot_tot=df_bbg_tot))
from finmarketpy.economics.quickchart import QuickChart
QuickChart(engine='plotly').plot_chart_with_ret_stats(df=df_index, plotly_plot_mode='offline_html', scale_factor=-1.5)
``` |
{
"source": "Joulco-Square/redis-dict",
"score": 3
} |
#### File: Joulco-Square/redis-dict/tests.py
```python
import unittest
import redis
from redis_dict import RedisDict
# !! Make sure you don't have keys named like this, they will be deleted.
TEST_NAMESPACE_PREFIX = 'test_rd'
redis_config = {
'host': 'localhost',
'port': 6379,
'db': 0,
}
class TestRedisDictBehaviorDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.redisdb = redis.StrictRedis(**redis_config)
cls.r = cls.create_redis_dict()
@classmethod
def tearDownClass(cls):
cls.clear_test_namespace()
@classmethod
def create_redis_dict(cls, namespace=TEST_NAMESPACE_PREFIX, **kwargs):
config = redis_config.copy()
config.update(kwargs)
return RedisDict(namespace=namespace, **config)
@classmethod
def clear_test_namespace(cls):
for key in cls.redisdb.scan_iter('{}:*'.format(TEST_NAMESPACE_PREFIX)):
cls.redisdb.delete(key)
def setUp(self):
self.clear_test_namespace()
def test_python3_all_methods_from_dictionary_are_implemented(self):
import sys
if sys.version_info[0] == 3:
redis_dic = self.create_redis_dict()
dic = dict()
self.assertEqual(len(set(dir({})) - set(dir(RedisDict))), 0)
self.assertEqual(len(set(dir(dic)) - set(dir(redis_dic))), 0)
def test_input_items(self):
"""Calling RedisDict.keys() should return an empty list."""
redis_dic = self.create_redis_dict()
dic = dict()
expected = 0
self.assertEqual(expected, len(dic))
self.assertEqual(expected, len(redis_dic))
expected = 1
expected_key = 'one item'
expected_value = 1
redis_dic[expected_key] = expected_value
dic[expected_key] = expected_value
self.assertEqual(expected, len(dic))
self.assertEqual(expected, len(redis_dic))
self.assertEqual(dic[expected_key], redis_dic[expected_key])
items = (('{} item'.format(i), i) for i in range(1, 5))
for key, value in items:
redis_dic[key] = value
dic[key] = value
expected = 5
self.assertEqual(expected, len(dic))
self.assertEqual(expected, len(redis_dic))
for key, expected_value in items:
self.assertEqual(dic[key], expected_value)
self.assertEqual(dic[key], redis_dic[key])
def test_supported_types(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_values = (("int", 1), ("float", 0.9), ("str", "im a string"), ("bool", True), ("None", None))
for key, value in input_values:
redis_dic[key] = value
dic[key] = value
expected_len = len(input_values)
self.assertEqual(expected_len, len(redis_dic))
self.assertEqual(len(dic), len(redis_dic))
for expected_key, expected_value in input_values:
result = redis_dic[expected_key]
self.assertEqual(expected_value, result)
self.assertEqual(dic[expected_key], result)
self.assertTrue(len(redis_dic) > 2)
redis_dic.clear()
dic.clear()
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
def test_update(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_values = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
self.assertEqual(len(input_values), 5)
redis_dic.update(input_values)
dic.update(input_values)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
self.assertEqual(len(input_values), 5)
for expected_key, expected_value in input_values.items():
self.assertEqual(redis_dic[expected_key], expected_value)
self.assertEqual(dic[expected_key], expected_value)
def test_iter(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
self.assertEqual(len(input_items), 5)
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
self.assertEqual(len(input_items), 5)
for expected_key, expected_value in input_items.items():
self.assertEqual(redis_dic[expected_key], expected_value)
self.assertEqual(dic[expected_key], expected_value)
for key in redis_dic:
self.assertTrue(key in input_items)
for key in redis_dic.iterkeys():
self.assertTrue(key in input_items)
for key in redis_dic.keys():
self.assertTrue(key in input_items)
for key, value in redis_dic.items():
self.assertEqual(input_items[key], value)
self.assertEqual(dic[key], value)
for key, value in redis_dic.iteritems():
self.assertEqual(input_items[key], value)
self.assertEqual(dic[key], value)
input_values = list(input_items.values())
dic_values = list(input_items.values())
result_values = list(redis_dic.itervalues())
self.assertEqual(sorted(map(str, input_values)), sorted(map(str, result_values)))
self.assertEqual(sorted(map(str, dic_values)), sorted(map(str, result_values)))
result_values = list(redis_dic.itervalues())
self.assertEqual(sorted(map(str, input_values)), sorted(map(str, result_values)))
self.assertEqual(sorted(map(str, dic_values)), sorted(map(str, result_values)))
def test_dict_method_update(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
self.assertEqual(len(input_items), 5)
def test_dict_method_pop(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
self.assertEqual(len(input_items), 5)
for i, key in enumerate(input_items.keys(), start=1):
expected = dic.pop(key)
result = redis_dic.pop(key)
self.assertEqual(expected, result)
self.assertEqual(len(dic), len(input_items)-i)
self.assertEqual(len(redis_dic), len(input_items)-i)
with self.assertRaises(KeyError):
dic.pop("item")
with self.assertRaises(KeyError):
redis_dic.pop("item")
def test_dict_method_pop_default(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
self.assertEqual(len(input_items), 5)
for i, key in enumerate(input_items.keys(), start=1):
expected = dic.pop(key)
result = redis_dic.pop(key)
self.assertEqual(expected, result)
self.assertEqual(len(dic), len(input_items)-i)
self.assertEqual(len(redis_dic), len(input_items)-i)
expected = "defualt item"
self.assertEqual(dic.pop("item", expected), expected)
self.assertEqual(redis_dic.pop("item", expected), expected)
expected = None
self.assertEqual(dic.pop("item", expected), expected)
self.assertEqual(redis_dic.pop("item", expected), expected)
def test_dict_method_popitem(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
self.assertEqual(len(input_items), 5)
expected = [dic.popitem() for _ in range(5)]
result = [redis_dic.popitem() for _ in range(5)]
self.assertEqual(sorted(map(str, expected)), sorted(map(str, result)))
self.assertEqual(len(dic), 0)
self.assertEqual(len(redis_dic), 0)
with self.assertRaises(KeyError):
dic.popitem()
with self.assertRaises(KeyError):
redis_dic.popitem()
def test_dict_method_setdefault(self):
redis_dic = self.create_redis_dict()
dic = dict()
dic.setdefault("item", 4)
redis_dic.setdefault("item", 4)
self.assertEqual(dic["item"], redis_dic["item"])
self.assertEqual(len(dic), 1)
self.assertEqual(len(redis_dic), 1)
dic.setdefault("item", 5)
redis_dic.setdefault("item", 5)
self.assertEqual(dic["item"], redis_dic["item"])
self.assertEqual(len(dic), 1)
self.assertEqual(len(redis_dic), 1)
dic.setdefault("foobar", 6)
redis_dic.setdefault("foobar", 6)
self.assertEqual(dic["item"], redis_dic["item"])
self.assertEqual(dic["foobar"], redis_dic["foobar"])
self.assertEqual(len(dic), 2)
self.assertEqual(len(redis_dic), 2)
def test_dict_method_get(self):
redis_dic = self.create_redis_dict()
dic = dict()
dic.setdefault("item", 4)
redis_dic.setdefault("item", 4)
self.assertEqual(dic["item"], redis_dic["item"])
self.assertEqual(len(dic), 1)
self.assertEqual(len(redis_dic), 1)
self.assertEqual(dic.get("item"), redis_dic.get("item"))
self.assertEqual(dic.get("foobar"), redis_dic.get("foobar"))
self.assertEqual(dic.get("foobar", "foobar"), redis_dic.get("foobar", "foobar"))
def test_dict_method_clear(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
dic.clear()
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
# Boundary check. clear on empty dictionary is valid
dic.clear()
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
def test_dict_method_clear(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"int": 1,
"float": 0.9,
"str": "im a string",
"bool": True,
"None": None,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), 5)
self.assertEqual(len(dic), 5)
dic_id = id(dic)
redis_dic_id = id(id)
dic_copy = dic.copy()
redis_dic_copy = redis_dic.copy()
self.assertNotEqual(dic_id, id(dic_copy))
self.assertNotEqual(redis_dic_id, id(redis_dic_copy))
dic.clear()
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
self.assertEqual(len(dic_copy), 5)
self.assertEqual(len(redis_dic_copy), 5)
def test_dict_exception_keyerror(self):
redis_dic = self.create_redis_dict()
dic = dict()
with self.assertRaisesRegex(KeyError, "appel"):
dic['appel']
with self.assertRaisesRegex(KeyError, "appel"):
redis_dic['appel']
with self.assertRaisesRegex(KeyError, "popitem\(\): dictionary is empty"):
dic.popitem()
with self.assertRaisesRegex(KeyError, "popitem\(\): dictionary is empty"):
redis_dic.popitem()
def test_dict_types_bool(self):
redis_dic = self.create_redis_dict()
dic = dict()
input_items = {
"True": True,
"False": False,
"NotTrue": False,
"NotFalse": True,
}
redis_dic.update(input_items)
dic.update(input_items)
self.assertEqual(len(redis_dic), len(input_items))
self.assertEqual(len(dic), len(input_items))
for key, expected_value in input_items.items():
self.assertEqual(redis_dic[key], expected_value)
self.assertEqual(dic[key], expected_value)
dic.clear()
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(dic), 0)
for k, v in input_items.items():
redis_dic[k] = v
dic[k] = v
for key, expected_value in input_items.items():
self.assertEqual(redis_dic[key], expected_value)
self.assertEqual(dic[key], expected_value)
for k, v in redis_dic.items():
self.assertEqual(input_items[k], v)
self.assertEqual(dic[k], v)
self.assertEqual(len(redis_dic), len(input_items))
self.assertEqual(len(dic), len(input_items))
def test_dict_method_pipeline(self):
redis_dic = self.create_redis_dict()
expected = {
'a': 1,
'b': 2,
'b': 3,
}
with redis_dic.pipeline():
for k, v in expected.items():
redis_dic[k] = v
self.assertEqual(len(redis_dic), len(expected))
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
with redis_dic.pipeline():
with redis_dic.pipeline():
for k, v in expected.items():
redis_dic[k] = v
self.assertEqual(len(redis_dic), len(expected))
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
with redis_dic.pipeline():
with redis_dic.pipeline():
with redis_dic.pipeline():
for k, v in expected.items():
redis_dic[k] = v
self.assertEqual(len(redis_dic), len(expected))
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
with redis_dic.pipeline():
with redis_dic.pipeline():
with redis_dic.pipeline():
with redis_dic.pipeline():
for k, v in expected.items():
redis_dic[k] = v
self.assertEqual(len(redis_dic), len(expected))
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
def test_dict_method_pipeline_buffer_sets(self):
redis_dic = self.create_redis_dict()
expected = {
'a': 1,
'b': 2,
'b': 3,
}
with redis_dic.pipeline():
for k, v in expected.items():
redis_dic[k] = v
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(redis_dic), len(expected))
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
with redis_dic.pipeline():
for k, v in redis_dic.items():
redis_dic[k] = v*2
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
for k, v in expected.items():
self.assertEqual(redis_dic[k], v*2)
self.assertEqual(len(redis_dic), len(expected))
redis_dic.clear()
self.assertEqual(len(redis_dic), 0)
with redis_dic.pipeline():
with redis_dic.pipeline():
for k, v in expected.items():
redis_dic[k] = v
self.assertEqual(len(redis_dic), 0)
self.assertEqual(len(redis_dic), len(expected))
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
with redis_dic.pipeline():
with redis_dic.pipeline():
for k, v in redis_dic.items():
redis_dic[k] = v * 2
for k, v in expected.items():
self.assertEqual(redis_dic[k], v)
for k, v in expected.items():
self.assertEqual(redis_dic[k], v * 2)
self.assertEqual(len(redis_dic), len(expected))
with redis_dic.pipeline():
redis_dic.clear()
self.assertEqual(len(redis_dic), len(expected))
self.assertEqual(len(redis_dic), 0)
def test_dict_method_fromkeys(self):
redis_dic = self.create_redis_dict()
dic = dict()
keys = ['a', 'b', 'c', 'd']
expected_dic = {k: None for k in keys}
result_dic = dic.fromkeys(keys)
result_redis_dic = redis_dic.fromkeys(keys)
self.assertEqual(len(result_dic), len(keys))
self.assertEqual(len(result_redis_dic), len(keys))
self.assertEqual(len(expected_dic), len(keys))
for k, v in expected_dic.items():
self.assertEqual(result_redis_dic[k], v)
self.assertEqual(result_dic[k], v)
def test_dict_method_fromkeys_with_default(self):
redis_dic = self.create_redis_dict()
dic = dict()
expected_default = 42
keys = ['a', 'b', 'c', 'd']
expected_dic = {k: expected_default for k in keys}
result_dic = dic.fromkeys(keys, expected_default)
result_redis_dic = redis_dic.fromkeys(keys, expected_default)
self.assertEqual(len(result_dic), len(keys))
self.assertEqual(len(result_redis_dic), len(keys))
self.assertEqual(len(expected_dic), len(keys))
for k, v in expected_dic.items():
self.assertEqual(result_redis_dic[k], v)
class TestRedisDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.redisdb = redis.StrictRedis(**redis_config)
cls.r = cls.create_redis_dict()
@classmethod
def tearDownClass(cls):
cls.clear_test_namespace()
@classmethod
def create_redis_dict(cls, namespace=TEST_NAMESPACE_PREFIX, **kwargs):
config = redis_config.copy()
config.update(kwargs)
return RedisDict(namespace=namespace, **config)
@classmethod
def clear_test_namespace(cls):
for key in cls.redisdb.scan_iter('{}:*'.format(TEST_NAMESPACE_PREFIX)):
cls.redisdb.delete(key)
def setUp(self):
self.clear_test_namespace()
def test_keys_empty(self):
"""Calling RedisDict.keys() should return an empty list."""
keys = self.r.keys()
self.assertEqual(keys, [])
def test_set_and_get(self):
"""Test setting a key and retrieving it."""
self.r['foobar'] = 'barbar'
self.assertEqual(self.r['foobar'], 'barbar')
def test_set_none_and_get_none(self):
"""Test setting a key with no value and retrieving it."""
self.r['foobar'] = None
self.assertIsNone(self.r['foobar'])
def test_set_and_get_multiple(self):
"""Test setting two different keys with two different values, and reading them."""
self.r['foobar1'] = 'barbar1'
self.r['foobar2'] = 'barbar2'
self.assertEqual(self.r['foobar1'], 'barbar1')
self.assertEqual(self.r['foobar2'], 'barbar2')
def test_get_nonexisting(self):
"""Test that retrieving a non-existing key raises a KeyError."""
with self.assertRaises(KeyError):
_ = self.r['nonexistingkey']
def test_delete(self):
"""Test deleting a key."""
self.r['foobargone'] = 'bars'
del self.r['foobargone']
self.assertEqual(self.redisdb.get('foobargone'), None)
def test_contains_empty(self):
"""Tests the __contains__ function with no keys set."""
self.assertFalse('foobar' in self.r)
def test_contains_nonempty(self):
"""Tests the __contains__ function with keys set."""
self.r['foobar'] = 'barbar'
self.assertTrue('foobar' in self.r)
def test_repr_empty(self):
"""Tests the __repr__ function with no keys set."""
expected_repr = str({})
actual_repr = repr(self.r)
self.assertEqual(actual_repr, expected_repr)
def test_repr_nonempty(self):
"""Tests the __repr__ function with keys set."""
self.r['foobars'] = 'barrbars'
expected_repr = str({'foobars': 'barrbars'})
actual_repr = repr(self.r)
self.assertEqual(actual_repr, expected_repr)
def test_str_nonempty(self):
"""Tests the __repr__ function with keys set."""
self.r['foobars'] = 'barrbars'
expected_str = str({'foobars': 'barrbars'})
actual_str = str(self.r)
self.assertEqual(actual_str, expected_str)
def test_len_empty(self):
"""Tests the __repr__ function with no keys set."""
self.assertEqual(len(self.r), 0)
def test_len_nonempty(self):
"""Tests the __repr__ function with keys set."""
self.r['foobar1'] = 'barbar1'
self.r['foobar2'] = 'barbar2'
self.assertEqual(len(self.r), 2)
def test_to_dict_empty(self):
"""Tests the to_dict function with no keys set."""
expected_dict = {}
actual_dict = self.r.to_dict()
self.assertEqual(actual_dict, expected_dict)
def test_to_dict_nonempty(self):
"""Tests the to_dict function with keys set."""
self.r['foobar'] = 'barbaros'
expected_dict = {u'foobar': u'barbaros'}
actual_dict = self.r.to_dict()
self.assertEqual(actual_dict, expected_dict)
def test_chain_set_1(self):
"""Test setting a chain with 1 element."""
self.r.chain_set(['foo'], 'melons')
expected_key = '{}:foo'.format(TEST_NAMESPACE_PREFIX)
self.assertEqual(self.redisdb.get(expected_key), b'str:melons')
def test_chain_set_2(self):
"""Test setting a chain with 2 elements."""
self.r.chain_set(['foo', 'bar'], 'melons')
expected_key = '{}:foo:bar'.format(TEST_NAMESPACE_PREFIX)
self.assertEqual(self.redisdb.get(expected_key), b'str:melons')
def test_chain_set_overwrite(self):
"""Test setting a chain with 1 element and then overwriting it."""
self.r.chain_set(['foo'], 'melons')
self.r.chain_set(['foo'], 'bananas')
expected_key = '{}:foo'.format(TEST_NAMESPACE_PREFIX)
self.assertEqual(self.redisdb.get(expected_key), b'str:bananas')
def test_chain_get_1(self):
"""Test setting and getting a chain with 1 element."""
self.r.chain_set(['foo'], 'melons')
self.assertEqual(self.r.chain_get(['foo']), 'melons')
def test_chain_get_empty(self):
"""Test getting a chain that has not been set."""
with self.assertRaises(KeyError):
_ = self.r.chain_get(['foo'])
def test_chain_get_2(self):
"""Test setting and getting a chain with 2 elements."""
self.r.chain_set(['foo', 'bar'], 'melons')
self.assertEqual(self.r.chain_get(['foo', 'bar']), 'melons')
def test_chain_del_1(self):
"""Test setting and deleting a chain with 1 element."""
self.r.chain_set(['foo'], 'melons')
self.r.chain_del(['foo'])
with self.assertRaises(KeyError):
_ = self.r.chain_get(['foo'])
def test_chain_del_2(self):
"""Test setting and deleting a chain with 2 elements."""
self.r.chain_set(['foo', 'bar'], 'melons')
self.r.chain_del(['foo', 'bar'])
with self.assertRaises(KeyError):
_ = self.r.chain_get(['foo', 'bar'])
def test_expire_context(self):
"""Test adding keys with an expire value by using the contextmanager."""
with self.r.expire_at(3600):
self.r['foobar'] = 'barbar'
actual_ttl = self.redisdb.ttl('{}:foobar'.format(TEST_NAMESPACE_PREFIX))
self.assertAlmostEqual(3600, actual_ttl, delta=2)
def test_expire_keyword(self):
"""Test ading keys with an expire value by using the expire config keyword."""
r = self.create_redis_dict(expire=3600)
r['foobar'] = 'barbar'
actual_ttl = self.redisdb.ttl('{}:foobar'.format(TEST_NAMESPACE_PREFIX))
self.assertAlmostEqual(3600, actual_ttl, delta=2)
def test_iter(self):
"""Tests the __iter__ function."""
key_values = {
'foobar1': 'barbar1',
'foobar2': 'barbar2',
}
for key, value in key_values.items():
self.r[key] = value
# TODO made the assumption that iterating the redisdict should return keys, like a normal dict
for key in self.r:
self.assertEqual(self.r[key], key_values[key])
# TODO behavior of multi and chain methods should be discussed.
# TODO python 2 couldn't skip
<EMAIL>
#def test_multi_get_with_key_none(self):
# """Tests that multi_get with key None raises TypeError."""
# with self.assertRaises(TypeError):
# self.r.multi_get(None)
def test_multi_get_empty(self):
"""Tests the multi_get function with no keys set."""
self.assertEqual(self.r.multi_get('foo'), [])
def test_multi_get_nonempty(self):
"""Tests the multi_get function with 3 keys set, get 2 of them."""
self.r['foobar'] = 'barbar'
self.r['foobaz'] = 'bazbaz'
self.r['goobar'] = 'borbor'
expected_result = ['barbar', 'bazbaz']
self.assertEqual(sorted(self.r.multi_get('foo')), sorted(expected_result))
def test_multi_get_chain_with_key_none(self):
"""Tests that multi_chain_get with key None raises TypeError."""
with self.assertRaises(TypeError):
self.r.multi_chain_get(None)
def test_multi_chain_get_empty(self):
"""Tests the multi_chain_get function with no keys set."""
self.assertEqual(self.r.multi_chain_get(['foo']), [])
def test_multi_chain_get_nonempty(self):
"""Tests the multi_chain_get function with keys set."""
self.r.chain_set(['foo', 'bar', 'bar'], 'barbar')
self.r.chain_set(['foo', 'bar', 'baz'], 'bazbaz')
self.r.chain_set(['foo', 'baz'], 'borbor')
# redis.mget seems to sort keys in reverse order here
expected_result = sorted([u'bazbaz', u'barbar'])
self.assertEqual(sorted(self.r.multi_chain_get(['foo', 'bar'])), expected_result)
def test_multi_dict_empty(self):
"""Tests the multi_dict function with no keys set."""
self.assertEqual(self.r.multi_dict('foo'), {})
def test_multi_dict_one_key(self):
"""Tests the multi_dict function with 1 key set."""
self.r['foobar'] = 'barbar'
expected_dict = {u'foobar': u'barbar'}
self.assertEqual(self.r.multi_dict('foo'), expected_dict)
def test_multi_dict_two_keys(self):
"""Tests the multi_dict function with 2 keys set."""
self.r['foobar'] = 'barbar'
self.r['foobaz'] = 'bazbaz'
expected_dict = {u'foobar': u'barbar', u'foobaz': u'bazbaz'}
self.assertEqual(self.r.multi_dict('foo'), expected_dict)
def test_multi_dict_complex(self):
"""Tests the multi_dict function by setting 3 keys and matching 2."""
self.r['foobar'] = 'barbar'
self.r['foobaz'] = 'bazbaz'
self.r['goobar'] = 'borbor'
expected_dict = {u'foobar': u'barbar', u'foobaz': u'bazbaz'}
self.assertEqual(self.r.multi_dict('foo'), expected_dict)
def test_multi_del_empty(self):
"""Tests the multi_del function with no keys set."""
self.assertEqual(self.r.multi_del('foobar'), 0)
def test_multi_del_one_key(self):
"""Tests the multi_del function with 1 key set."""
self.r['foobar'] = 'barbar'
self.assertEqual(self.r.multi_del('foobar'), 1)
self.assertIsNone(self.redisdb.get('foobar'))
def test_multi_del_two_keys(self):
"""Tests the multi_del function with 2 keys set."""
self.r['foobar'] = 'barbar'
self.r['foobaz'] = 'bazbaz'
self.assertEqual(self.r.multi_del('foo'), 2)
self.assertIsNone(self.redisdb.get('foobar'))
self.assertIsNone(self.redisdb.get('foobaz'))
def test_multi_del_complex(self):
"""Tests the multi_del function by setting 3 keys and deleting 2."""
self.r['foobar'] = 'barbar'
self.r['foobaz'] = 'bazbaz'
self.r['goobar'] = 'borbor'
self.assertEqual(self.r.multi_del('foo'), 2)
self.assertIsNone(self.redisdb.get('foobar'))
self.assertIsNone(self.redisdb.get('foobaz'))
self.assertEqual(self.r['goobar'], 'borbor')
if __name__ == '__main__':
import sys
if sys.version_info[0] == 2:
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
unittest.main()
``` |
{
"source": "Joule4247532/CS-Army-World-Destroying-Discord-Bot",
"score": 3
} |
#### File: plugin/starboard/__main__.py
```python
import os, sys
import typing
import discord
import json
class StarboardPlugin:
"""Starboard plugin"""
starboard_channel_id = ""
configs = {"min_stars" :[1], "accepted_emojis":["star", "star2"] }
async def setStarboard(message, discord_client: object) -> str:
StarboardPlugin.starboard_channel_id = message.channel.id
await discord_client.send_message(message.channel, "Starboard was set to this channel")
async def on_reaction_add(reaction, user, discord_client: object):
await discord_client.send_message(reaction.message.channel, reaction.emoji)
if reaction.emoji in StarboardPlugin.configs["accepted_emojis"]:
reactionCnt = 0
# get all votes
temp = reaction.message.reactions
for r in temp:
if r.emoji.name in StarboardPlugin.configs["accepted_emojis"]:
reactionCnt += r.count
if reactionCnt >= StarboardPlugin.configs["min_stars"]:
await StarboardPlugin.postToBoard(reaction.message)
print(reaction.message)
async def postToBoard(message, discord_client: object):
if StarboardPlugin.starboard_channel_id != "" :
await discord_client.send_message(client.get_channel(starboard_channel_id), message.content)
```
#### File: plugin/test_plugin/__main__.py
```python
import os, sys
import typing
import discord
class TestPlugin:
async def ping_func(message, discord_client: object) -> str:
await discord_client.send_message(message.channel, "WE WUZ KINGS N SHIET")
async def echo_func( message, discord_client: object) -> str:
await discord_client.send_message(message.channel, message.content.split("!echo ")[-1])
async def echoto_func(message, discord_client: object) -> str:
temp = message.content.split()
await discord_client.send_message(discord.Object(id=temp[1]), temp[2])
``` |
{
"source": "joulebit/Projects-for-learning",
"score": 4
} |
#### File: Projects-for-learning/Calculator/operators.py
```python
import numbers
class Operator:
"""class for operators"""
def __init__(self, operator, strength):
self.operator = operator
self.strength = strength
def execute(self, element1, element2, debug=True):
"""execute given operator on two elements"""
if (not isinstance(element1, numbers.Number)) or (not isinstance(element2, numbers.Number)):
raise TypeError("Cannot execute func if element is not a number")
result = self.operator(element1, element2)
if debug is True:
print("Function: " + self.operator.__name__
+ "({:f},{:f}) = {:f}".format(element1, element2, result))
return result
def __gt__(self, other):
"""to compare the strength of different operators"""
if self.strength > other.strength:
return True
return False
def get_operator(self):
"""return the operator of the object"""
return self.operator
```
#### File: Projects-for-learning/Calculator/queues.py
```python
from container import Container
class Queue(Container):
"""queue class for storing items"""
def __init__(self):
Container.__init__(self)
def peek(self):
"""peek at the first item in the list"""
if not self.is_empty():
return self.items[0]
return None
def pop(self):
"""pop the first item"""
if not self.is_empty():
return self.items.pop(0)
return None
```
#### File: Projects-for-learning/Calculator/rpn.py
```python
from stack import Stack
from functions import Function
from operators import Operator
from queues import Queue
class RPN_solver:
"""solve a given RPN list"""
def __init__(self):
self.tall = Stack()
def solve(self, elements):
"""solve the given RPN sequence"""
for element in elements:
if isinstance(element, (float, int)):
self.tall.push(element)
elif isinstance(element, Function):
last_element = self.tall.pop()
new_element = element.execute(last_element)
self.tall.push(new_element)
elif isinstance(element, Operator):
tall2 = self.tall.pop()
tall1 = self.tall.pop()
new_element = element.execute(tall1, tall2)
self.tall.push(new_element)
return self.tall.peek()
class RPN_converter:
"""convert a list of numbers and operations and functions to RPN format"""
def __init__(self):
self.operators = Stack()
self.output = Queue()
self.out = []
def convert(self, elements):
"""convert given list to RPN format"""
for element in elements:
if isinstance(element, (float, int)):
self.output.push(element)
elif isinstance(element, Function):
self.operators.push(element)
elif element == "(":
self.operators.push(element)
elif element == ")":
while self.operators.peek() != "(":
self.output.push(self.operators.pop())
self.operators.pop()
elif isinstance(element, Operator):
while self.operators.peek() != None or self.operators.peek() != "(":
if isinstance(self.operators.peek(), Operator) and \
self.operators.peek() > element:
self.output.push(self.operators.pop())
else:
break
self.operators.push(element)
while self.operators.peek() != None:
self.output.push(self.operators.pop())
return self.output.items
```
#### File: Projects-for-learning/Calculator/unit_test.py
```python
import unittest
import numpy as np
from calculator import Calculator, Text_parser
from queues import Queue
from stack import Stack
from rpn import RPN_solver, RPN_converter
from operators import Operator
from functions import Function
class TestCalculator(unittest.TestCase):
def test_calc_operators(self):
c = Calculator()
value = c.calculate_expression("47.03pluss-3")
self.assertEqual(value, 44.03)
value = c.calculate_expression("5gange6")
self.assertEqual(value, 30)
value = c.calculate_expression("-5minus-9")
self.assertEqual(value, 4)
value = c.calculate_expression("15dele5")
self.assertEqual(value, 3)
def test_calc_functions(self):
c = Calculator()
value = c.calculate_expression("sqrt2")
self.assertAlmostEqual(value, np.sqrt(2))
value = c.calculate_expression("exp11")
self.assertAlmostEqual(value, np.exp(11))
value = c.calculate_expression("log14")
self.assertAlmostEqual(value, np.log(14))
value = c.calculate_expression("sin7")
self.assertAlmostEqual(value, np.sin(7))
value = c.calculate_expression("cos52")
self.assertAlmostEqual(value, np.cos(52))
def test_operator_strength(self):
c = Calculator()
value = c.calculate_expression("3 pluss 5 gange 2")
self.assertEqual(value, 13)
value = c.calculate_expression("2 gange3 pluss 5 gange 2")
self.assertEqual(value, 16)
def test_calc_nested_parentheses(self):
c = Calculator()
value = c.calculate_expression("(2 pluss 5)gange 7")
self.assertEqual(value, 49)
value = c.calculate_expression("exp((3 minus 2)pluss(8 dele (1pluss1)))")
self.assertEqual(value, np.exp(5))
value = c.calculate_expression("4 minus(3 minus -1)")
self.assertEqual(value, 0)
def test_queues(self):
q = Queue()
for i in range(1,100):
q.push(i)
self.assertEqual(99,q.size())
self.assertEqual(1,q.peek())
self.assertEqual(1,q.pop())
def test_stacks(self):
q = Stack()
for i in range(1,100):
q.push(i)
self.assertEqual(99,q.size())
self.assertEqual(99,q.peek())
self.assertEqual(99,q.pop())
def test_RPN_solve(self):
r = RPN_solver()
value = r.solve([1, 2, 3, Operator(np.multiply,1), Operator(np.add,0), Function(np.exp)])
self.assertEqual(value, np.exp(7))
def test_RPN_convert(self):
t = RPN_converter().convert([5, Operator(np.multiply,1), "(",1,Operator(np.add,0),2,")"])
self.assertEqual(t[0], 5)
self.assertEqual(t[1], 1)
self.assertEqual(t[2], 2)
self.assertTrue(isinstance(t[3], Operator))
self.assertTrue(isinstance(t[4], Operator))
def test_text_parser(self):
parser = Text_parser()
parsed = parser.translate("(1pluss2)minus3gange4exp5")
print(parsed)
self.assertTrue(1,parsed)
```
#### File: Projects-for-learning/Cryptography/caesar.py
```python
from cipher import Cipher
import random
class Caesar(Cipher):
"""push the symbols key steps to the right"""
def encode(self, text, key):
encoded = ''
for letter in text:
encoded += self.dictionary.get((ord(letter)-32+key)%95)
return encoded
def decode(self, coded_text, key):
decoded = ''
for letter in coded_text:
decoded += self.dictionary.get((ord(letter)-32-key)%95)
return decoded
def generate_key(self):
return random.randint(1,94)
"""forbedre verify funksjonen"""
```
#### File: Projects-for-learning/Cryptography/hacker.py
```python
from sender_reciever import Reciever, Sender
from affine import Affine
from ubrytelig import Ubrytelig
class Hacker(Reciever):
"""Brute force hacker!
Denne versjonen trenger ikke å få det brukte cipheret som input,
hvis cipheret er det Ubrytelige,
tar dekrypteringen bare 52 sekunder lengre"""
def __init__(self):
Reciever.__init__(self)
_f = open('english_words.txt', "r")
self.valid_words = set(_f.read().split())
self.valid_inverse = [1, 2, 3, 4, 6, 7, 8, 9, 11,\
12, 13, 14, 16, 17, 18, 21,
22, 23, 24, 26, 27, 28, 29,
31, 32, 33, 34, 36, 37, 39,
41, 42, 43, 44, 46, 47, 48,
49, 51, 52, 53, 54, 56, 58,
59, 61, 62, 63, 64, 66, 67,
68, 69, 71, 72, 73, 74, 77,
78, 79, 81, 82, 83, 84, 86,
87, 88, 89, 91, 92, 93, 94]
_f.close()
def decode(self, text):
guessed_word = ''
words_recognized_record = 0
dummy = Reciever()
dummy.set_cipher(Affine())
"""Tester for Affine cipher, siden det også plukker opp alle
multiplikasjon og alle Caesar"""
for multiply in self.valid_inverse:
for add in range(95):
dummy.set_key((multiply, add))
dummy.decode(text)
decoded_text = dummy.get_decoded().lower().split()
matching_words = set(decoded_text).intersection(self.valid_words)
if len(matching_words) > words_recognized_record:
words_recognized_record = len(matching_words)
guessed_word = " ".join(decoded_text)
"""Hvis vi kjenner igjen minst tre ord, så kan vi stoppe
searchet tidlig og spare opp til ca 50 sekunder"""
if words_recognized_record >= 3:
return guessed_word
"""Ellers må vi annta at det "ubrytelige" cipheret ble brukt"""
"""10 min å hacke med kodeord "pizza"...
og 30 sekund å hacke med kodeord "ant"... """
dummy.set_cipher(Ubrytelig())
count = 1
for kodeord in self.valid_words:
dummy.set_key(kodeord)
dummy.decode(text)
decoded_text = dummy.get_decoded().lower().split()
#For å visualisere hvor langt dekrypteringen har kommet i fase 2
print(decoded_text, count)
count += 1
#Denne operasjonen koster sykt mye
matching_words = set(decoded_text).intersection(self.valid_words)
if len(matching_words) > words_recognized_record:
words_recognized_record = len(matching_words)
guessed_word = " ".join(decoded_text)
if words_recognized_record >= 5:
print("Kodeordet er", kodeord)
return guessed_word
else:
return "Could not brute force the message"
A = Sender()
#A.set_key((2,0))
#A.set_cipher(Affine())
A.set_key("pizza")
A.set_cipher(Ubrytelig())
A.encode("To recieve full marks you have to solve all parts")
#Print how the encrypted text looks like
print(A.get_encoded())
H = Hacker()
print(H.decode(A.get_encoded()))
```
#### File: Projects-for-learning/Rock Paper Scissors/TilfeldigSpiller.py
```python
import random as rn
from Spiller import Spiller
class Tilfeldig(Spiller):
"""Tilfeldig spiller"""
def __init__(self, name):
super().__init__(name)
def velg_aksjon(self):
return rn.randint(0, 2)
``` |
{
"source": "JouleCai/GeoSpaceLab",
"score": 2
} |
#### File: geospacelab/datahub/_base_metadata.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import geospacelab.toolbox.utilities.pyclass as pyclass
class StrAttrBase(str):
def __new__(cls, str_in):
if issubclass(str_in.__class__, StrAttrBase):
obj = str_in
else:
obj = str.__new__(cls, str_in)
return obj
def config(self, logging=True, **kwargs):
pyclass.set_object_attributes(self, append=False, logging=logging, **kwargs)
class DatabaseModel(StrAttrBase):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in)
obj.category = kwargs.pop('category', '')
obj.url = kwargs.pop('url', None)
obj.description = kwargs.pop('description', '')
obj.notes = kwargs.pop('notes', '')
return obj
class FacilityModel(StrAttrBase):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in)
obj.category = kwargs.pop('category', '')
obj.url = kwargs.pop('url', None)
obj.description = kwargs.pop('description', '')
obj.notes = kwargs.pop('notes', '')
return obj
class ProductModel(StrAttrBase):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in)
obj.category = kwargs.pop('category', '')
obj.url = kwargs.pop('url', None)
obj.description = kwargs.pop('description', '')
obj.notes = kwargs.pop('notes', '')
return obj
class InstrumentModel(StrAttrBase):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in)
obj.category = kwargs.pop('category', '')
obj.description = kwargs.pop('description', '')
obj.notes = kwargs.pop('notes', '')
return obj
class SiteModel(StrAttrBase):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in)
obj.category = kwargs.pop('category', '')
obj.name = kwargs.pop('name', '')
obj.code = str_in
obj.location = kwargs.pop('location', {'GEO_LAT': None, 'GEO_LON': None, 'GEO_ALT': None})
obj.description = kwargs.pop('description', '')
obj.notes = kwargs.pop('notes', '')
return obj
class MetadataModel(object):
def __init__(self):
self.database = ''
@property
def database(self):
return self._database
@database.setter
def database(self, value):
self._database = value
def config(self, logging=True, **kwargs):
pyclass.set_object_attributes(self, append=False, logging=logging, **kwargs)
```
#### File: madrigal/dmsp/downloader.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import datetime
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse as dtparse
import numpy as np
import re
import requests
import bs4
import os
import pathlib
from geospacelab import preferences as pfr
import geospacelab.datahub.sources.madrigal as madrigal
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.toolbox.utilities.pydatetime as dttool
def test():
dt_fr = datetime.datetime(2014, 3, 10)
dt_to = datetime.datetime(2014, 3, 10)
download_obj = Downloader(dt_fr, dt_to, sat_id='f16', file_type='s1')
data_type_dict = {
's1': 'ion drift',
's4': 'plasma temp',
'e': 'flux/energy',
'ssies2': 'SSIES-2',
'hp': 'Hemispherical power'
}
class Downloader(object):
"""Download the GNSS TEC data
"""
def __init__(self, dt_fr, dt_to, sat_id=None, file_type=None, data_file_root_dir=None,
user_fullname=madrigal.default_user_fullname,
user_email=madrigal.default_user_email,
user_affiliation=madrigal.default_user_affiliation):
self.user_fullname = user_fullname
self.user_email = user_email
self.user_affiliation = user_affiliation
self.instrument_code = 8100
dt_fr = dttool.get_start_of_the_day(dt_fr)
dt_to = dttool.get_start_of_the_day(dt_to)
if sat_id is None:
sat_id = input("input the sat_id (e.g., f16)")
self.sat_id = sat_id
if file_type is None:
file_type = input("Input the file type (s1, s4, or e):")
self.file_type = file_type
if dt_fr == dt_to:
dt_to = dt_to + datetime.timedelta(hours=23, minutes=59)
self.dt_fr = dt_fr # datetime from
self.dt_to = dt_to # datetime to
if data_file_root_dir is None:
self.data_file_root_dir = pfr.datahub_data_root_dir / 'Madrigal' / 'DMSP'
else:
self.data_file_root_dir = data_file_root_dir
self.done = False
self.madrigal_url = "http://cedar.openmadrigal.org/"
self.download_madrigal_files()
def download_madrigal_files(self, download_pp=False):
mylog.simpleinfo.info("Searching data from the Madrigal database ...")
exp_list, _, database = madrigal.utilities.list_experiments(
self.instrument_code, self.dt_fr, self.dt_to, madrigal_url=self.madrigal_url
)
for exp in exp_list:
files = database.getExperimentFiles(exp.id)
for file in files:
if self.file_type == 'hp':
rpattern = data_type_dict([self.file_type])
else:
rpattern = self.sat_id.upper() + '.*' + data_type_dict[self.file_type]
m = re.search(rpattern, file.kindatdesc)
if m is None:
continue
file_path_remote = pathlib.Path(file.name)
file_name = file_path_remote.name
m = re.search(r'([\d]{8})', file_name)
dtstr = m.group()
thisday = datetime.datetime.strptime(dtstr, "%Y%m%d")
data_file_dir = self.data_file_root_dir / thisday.strftime("%Y%m") / thisday.strftime('%Y%m%d')
data_file_dir.mkdir(parents=True, exist_ok=True)
data_file_path = data_file_dir / file_name
if data_file_path.is_file():
mylog.simpleinfo.info("The file {} has been downloaded.".format(data_file_path.name))
continue
mylog.simpleinfo.info("Downloading {} from the Madrigal database ...".format(file_name))
database.downloadFile(
file_path_remote, data_file_path,
self.user_fullname, self.user_email, self.user_affiliation,
"hdf5"
)
self.done = True
mylog.simpleinfo.info("Save the file as {} in the directory {}".format(file_name, data_file_dir))
if __name__ == "__main__":
test()
```
#### File: madrigal/eiscat/downloader.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
"""
To download the EISCAT quickplots and analyzed data archived in http://portal.eiscat.se/schedule/schedule.cgi
By Lei Cai on 2021.04.01
"""
import datetime
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse as dtparse
import numpy as np
import re
import requests
import bs4
import os
import pathlib
from geospacelab import preferences as pfr
import geospacelab.datahub.sources.madrigal as madrigal
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.toolbox.utilities.pydatetime as dttool
def test():
sites = ['VHF']
dt_fr = datetime.datetime(2010, 1, 1,)
dt_to = datetime.datetime(2021, 12, 31)
download_obj = Downloader(dt_fr, dt_to, sites=sites, kind_data="madrigal")
# schedule = EISCATSchedule(dt_fr=dt_fr, dt_to=dt_to)
# schedule.to_txt()
class Downloader(object):
"""Download the quickplots and archieved analyzed results from EISCAT schedule webpage
"""
def __init__(self, dt_fr, dt_to, sites=None, data_file_root_dir=None, kind_data="EISCAT", download_pp=False,
user_fullname=madrigal.default_user_fullname,
user_email=madrigal.default_user_email,
user_affiliation=madrigal.default_user_affiliation):
self.user_fullname = user_fullname
self.user_email = user_email
self.user_affiliation = user_affiliation
dt_fr = dttool.get_start_of_the_day(dt_fr)
dt_to = dttool.get_start_of_the_day(dt_to)
if dt_fr == dt_to:
dt_to = dt_to + datetime.timedelta(hours=23, minutes=59)
self.dt_fr = dt_fr # datetime from
self.dt_to = dt_to # datetime to
if sites is None:
sites = ['UHF', 'VHF', 'KIR', 'SOD',
'ESR'] # site list: default is the list of all the sites, use e.g., ['UHF'] for single site
self.sites = sites
if data_file_root_dir is None:
self.data_file_root_dir = pfr.datahub_data_root_dir / 'Madrigal' / 'EISCAT' / 'analyzed'
else:
self.data_file_root_dir = data_file_root_dir
self.done = False
if kind_data.lower() == "madrigal":
self.madrigal_url = "https://madrigal.eiscat.se/madrigal/"
self.download_madrigal_files(download_pp=download_pp)
elif kind_data.lower() == "eiscat":
self.url_base = 'http://portal.eiscat.se' # EISCAT schedule webpage
self.urls = []
self.search_scheduled_url()
self.download_eiscat_files()
def search_scheduled_url(self):
"""
Search available urls
:return: None
"""
diff_month = (self.dt_to.year - self.dt_fr.year) * 12 + self.dt_to.month - self.dt_fr.month
urls = set()
for nm in range(diff_month + 1):
dt_now = datetime.datetime(self.dt_fr.year, self.dt_fr.month, 1) + relativedelta(months=nm)
payload_options = []
for site in self.sites:
payload_options.append(site)
if site == 'UHF':
payload_options.append('TRO') # full list: VHF=on&UHF=on&TRO=on&KIR=on&SOD=on&ESR=on&HEA=on
payload = {
'year': "{:04d}".format(dt_now.year),
'month': '{:d}'.format(dt_now.month),
'A': 'on'
}
for opt in payload_options:
payload[opt] = 'on'
r = requests.get(self.url_base + '/schedule/schedule.cgi', params=payload)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
strong_tags = soup.find_all('strong')
for stag in strong_tags:
yymm = dt_now.strftime("%Y:%m")
if yymm not in stag.string:
continue
tagline = stag.sourceline
# print(stag.string)
content = r.text.split('\n')[tagline - 1]
# print(content)
soup_line = bs4.BeautifulSoup(content, 'html.parser')
link = soup_line.find('a', href=True)['href']
if 'experiment_list' not in link:
print('No link for the existing experiment ({})'.format(link))
continue
urls.add(link)
self.urls = urls
return None
def download_eiscat_files(self):
"""
Download the files
"""
if not list(self.urls):
mylog.StreamLogger.info("No experiments available!")
return None
for url in self.urls:
cookies = {
'user_email': self.user_email,
'user_fullname': self.user_fullname,
'user_affiliation': self.user_affiliation
}
r = requests.get(url, cookies=cookies)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
links = soup.find_all('a', href=True)
for link in links:
href = link['href']
if any(href.endswith(s) for s in ['.png', '.tar.gz', '.hdf5']):
filename = href.split('/')[-1]
match = re.search('\d{4}-\d{2}-\d{2}', filename)
thisday = datetime.datetime.strptime(match.group(), '%Y-%m-%d')
if thisday < self.dt_fr or thisday > self.dt_to:
continue
for key, value in locs.items():
if '/' + value + '/' in href:
site = key
#site1 = re.search("[@|_][a-zA-Z0-9]*[.]", filename).group(0)[1:-1]
search_pattern = re.search("\d{4}-\d{2}-\d{2}_[a-zA-Z0-9]*", filename).group(0)
sub_dir = search_pattern + '@' + site
data_file_dir = self.data_file_root_dir / site / thisday.strftime('%Y') / sub_dir
data_file_dir.mkdir(parents=True, exist_ok=True)
remote_file = requests.get(href)
file_path = data_file_dir / filename
if file_path.is_file():
print("The file {} has been downloaded.".format(filename))
continue
mylog.simpleinfo.info(
'Downloading "{} from portal.eiscat.se to {} ..."'.format(filename, data_file_dir)
)
with open(file_path, "wb") as eiscat:
eiscat.write(remote_file.content)
mylog.simpleinfo.info('Done!')
self.done = True
return
def download_madrigal_files(self, download_pp=False):
icodes = []
for site in self.sites:
icodes.extend(instrument_codes[site])
for icode in icodes:
exp_list, _, database = madrigal.utilities.list_experiments(
icode, self.dt_fr, self.dt_to, madrigal_url=self.madrigal_url)
for exp in exp_list:
files = database.getExperimentFiles(exp.id)
for file in files:
if not download_pp and 'GUISDAP pp' in file.kindatdesc:
continue
file_path = pathlib.Path(file.name)
site = file_path.name.split("@")[1][0:3].upper()
if '32' in site or '42' in site:
site = 'ESR'
match = re.search('\d{4}-\d{2}-\d{2}', file_path.name)
dt_str = match.group(0)
thisday = datetime.datetime.strptime(dt_str, "%Y-%m-%d")
if thisday < self.dt_fr or thisday > self.dt_to:
continue
# sub_dir = file_path.name.split('_', maxsplit=1)[1]
search_pattern = re.search("\d{4}-\d{2}-\d{2}_[a-zA-Z0-9]*", file_path.name).group(0)
sub_dir = search_pattern + '@' + site
data_file_dir = self.data_file_root_dir / site / dt_str[0:4] / sub_dir
data_file_dir.mkdir(parents=True, exist_ok=True)
data_file_path = data_file_dir / file_path.name
if data_file_path.is_file():
mylog.simpleinfo.info("The file {} has been downloaded.".format(data_file_path.name))
continue
mylog.simpleinfo.info("Downloading {} from the Madrigal database ...".format(file_path.name))
database.downloadFile(
file_path, data_file_path,
self.user_fullname, self.user_email, self.user_affiliation,
"hdf5"
)
self.done = True
mylog.simpleinfo.info("Done!")
# fhdf5 = h5py.File(outDir + fn, 'r')
instrument_codes = {
'UHF': [72],
'VHF': [74],
'ESR': [95],
'SOD': [73, 76],
'KIR': [71, 75]
}
locs = {
'UHF': 'tro',
'VHF': 'eis',
'ESR': 'lyr',
'SOD': 'sod',
'KIR': 'kir'
}
class EISCATSchedule(object):
""" Analyze EISCAT schedule
"""
def __init__(self, dt_fr, dt_to, sites=None, root_filepath=None, monthly=False):
self.url_base = 'http://portal.eiscat.se' # EISCAT schedule webpage
self.dt_fr = dt_fr
self.dt_to = dt_to # datetime
if sites is None:
sites = ['UHF', 'VHF', 'KIR', 'SOD',
'ESR'] # site list: default is the list of all the sites, use e.g., ['UHF'] for single site
self.sites = sites
self.rs = []
self.experiments = []
self.urls = []
if root_filepath is None:
root_filepath = pfr.datahub_data_root_dir # root path for the download files. Default: the current work directory + /results/
self.root_filepath = root_filepath
self.monthly = monthly # True: download the whole month data. False: only for the specific date
self.search_archives()
self.analyze_archives()
# self.to_txt()
def search_archives(self):
"""
Search available urls
:return: None
"""
diff_month = (self.dt_to.year - self.dt_fr.year) * 12 + self.dt_to.month - self.dt_fr.month
for nm in range(diff_month + 1):
dt_now = datetime.datetime(self.dt_fr.year, self.dt_fr.month, 1) + relativedelta(months=nm)
payload_options = []
for site in self.sites:
payload_options.append(site)
if site == 'UHF':
payload_options.append('TRO') # full list: VHF=on&UHF=on&TRO=on&KIR=on&SOD=on&ESR=on&HEA=on
payload = {
'year': "{:04d}".format(dt_now.year),
'month': '{:d}'.format(dt_now.month),
'A': 'on'
}
for opt in payload_options:
payload[opt] = 'on'
r = requests.get(self.url_base + '/schedule/schedule.cgi', params=payload)
self.rs.append(r)
def analyze_archives(self):
nexp = 0
for r in self.rs:
soup = bs4.BeautifulSoup(r.text, 'html.parser')
schedule = soup.pre.text.split('\n')
for item_text in schedule:
if not is_date(item_text[0:10].replace(':', '-')):
continue
dt_now = datetime.datetime.strptime(item_text[0:10], '%Y:%m:%d')
if not self.monthly:
if dt_now < self.dt_fr or dt_now > self.dt_to:
continue
time_rec = item_text[15:63]
inds = [x for x, v in enumerate(time_rec) if v == 'A']
if not list(inds):
continue
info = item_text[65:-1].split(' ')
# print(dt_fr)
# print(info)
antenna = info[0]
exp = info[1]
info = item_text[65:-1].split(')')[-1].strip()
info_splits = info.split('_')
code = info_splits[0]
mode = ''
version = ''
if len(info_splits) > 1:
mode = info_splits[1]
if len(info_splits) > 2:
version = info_splits[2]
# try:
# code = info.split('_')[0]
# mode = info.split('_')[1]
# version = info.split('_')[2]
# except IndexError:
# print('Not a standard experiment {}'.format(info))
# continue
ind_diff = np.diff(inds)
ind_gaps = np.where(ind_diff > 3)[0] # time difference greater than 1.5 h
ind_ranges = [];
for ind_, ind_gap in enumerate(ind_gaps):
if ind_ == 0:
ind_fr = inds[0]
ind_to = inds[ind_gap]
else:
ind_fr = inds[ind_gaps[ind_ - 1] + 1]
ind_to = inds[ind_gap]
ind_ranges.append([ind_fr, ind_to])
if list(ind_gaps):
ind_ranges.append([inds[ind_gaps[-1] + 1], inds[-1]])
else:
ind_ranges.append([inds[0], inds[-1]])
for ind_range in ind_ranges:
ind_1 = ind_range[0]
ind_2 = ind_range[1]
dt_fr = dt_now + datetime.timedelta(hours=ind_1 / 2)
HH = (ind_2 + 1) / 2
if HH >= 24:
HH = 23
MM = 59
else:
MM = 0
dt_to = dt_now + datetime.timedelta(hours=HH, minutes=MM)
suffix = 'OK'
if time_rec.count('A') < (ind_2 - ind_1 + 1):
suffix = '?'
self.experiments.append([dt_fr, dt_to, antenna, code, mode, version, exp, suffix, time_rec])
nexp = nexp + 1
if nexp == 0:
print('No available archived experiments!')
return False
return True
def to_txt(self, file_name=None, file_path=None):
if file_name is None:
file_name = "EISCAT_archives.txt"
if file_path is None:
file_path = self.root_filepath
with open(os.path.join(file_path, file_name), 'w') as out_file:
for line in self.experiments:
line_str = "{0:<17s}{1:<7s}{2:<5s}{3:<6s}{4:<20s}{5:<25s}{6:<20s}{7:<30s}{8:<65s}".format(
line[0].strftime('%Y-%m-%d %H:%M'), line[1].strftime('%H:%M'),
line[7], line[2], line[3], line[4], line[5], line[6], line[8]) + '\n'
out_file.write(line_str)
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
:param string: str, string to check for date
:param fuzzy: bool, ignore unknown tokens in string if True
"""
try:
dtparse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
if __name__ == "__main__":
test()
```
#### File: sources/madrigal/__init__.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
from geospacelab import preferences
import geospacelab.datahub.sources.madrigal.utilities as utilities
from geospacelab.datahub import DatabaseModel
class MadrigalDatabase(DatabaseModel):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in, **kwargs)
return obj
madrigal_database = MadrigalDatabase('Madrigal')
madrigal_database.url = 'http://cedar.openmadrigal.org/'
madrigal_database.category = 'online database'
try:
default_user_fullname = preferences.user_config['datahub']['madrigal']['user_fullname']
default_user_email = preferences.user_config['datahub']['madrigal']['user_email']
default_user_affiliation = preferences.user_config['datahub']['madrigal']['user_email']
except KeyError:
print("Inputs for accessing the Madrigal database.")
if preferences._on_rtd:
default_user_fullname = 'GeospaceLab'
default_user_email = '<EMAIL>'
default_user_affiliation = 'GeospaceLab'
save = 'y'
else:
default_user_fullname = input("User's full name: ")
default_user_email = input("User's email: ")
default_user_affiliation = input("User's affiliation: ")
save = input("Save as default? [y]/n: ")
if save.lower() in ['', 'y', 'yes']:
uc = preferences.user_config
uc.setdefault('datahub', {})
uc['datahub'].setdefault('madrigal', {})
uc['datahub']['madrigal']['user_fullname'] = default_user_fullname
uc['datahub']['madrigal']['user_email'] = default_user_email
uc['datahub']['madrigal']['user_affiliation'] = default_user_affiliation
preferences.set_user_config(user_config=uc, set_as_default=True)
```
#### File: sources/superdarn/__init__.py
```python
from geospacelab import preferences
from geospacelab.datahub import DatabaseModel
class SuperDARNDatabase(DatabaseModel):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in, **kwargs)
return obj
superdarn_database = SuperDARNDatabase('SuperDARN')
superdarn_database.url = 'http://vt.superdarn.org/tiki-index.php'
superdarn_database.category = 'online database'
superdarn_database.Notes = ''
```
#### File: superdarn/potmap/downloader.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import datetime
import numpy as np
import requests
import bs4
import pathlib
import re
import netCDF4 as nc
import cftime
import ftplib
from contextlib import closing
import geospacelab.toolbox.utilities.pydatetime as dttool
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.datahub.sources.wdc as wdc
from geospacelab import preferences as prf
class Downloader(object):
def __init__(self, dt_fr, dt_to, data_res=None, pole='N', data_file_root_dir=None):
self.dt_fr = dt_fr
self.dt_to = dt_to
self.done = False
if data_res is None:
data_res = 2 # in minutes
self.data_res = data_res
self.pole = pole
if data_file_root_dir is None:
self.data_file_root_dir = prf.datahub_data_root_dir / 'SuperDARN' / 'PotentialMap'
else:
self.data_file_root_dir = pathlib.Path(data_file_root_dir)
self.download()
def download(self):
diff_days = dttool.get_diff_days(self.dt_fr, self.dt_to)
for i in range(diff_days + 1):
dt1 = self.dt_fr + datetime.timedelta(days=i)
fn = '_'.join(['SuperDARN', 'POTMAP', str(self.data_res) + 'min', dt1.strftime('%Y%m%d'), self.pole]) + '.dat'
file_path = self.data_file_root_dir / dt1.strftime('%Y') / fn
self.save_to_netcdf(file_path)
def save_to_netcdf(self, file_path):
with open(file_path, 'r') as f:
text = f.read()
results = re.findall(
r'^\s*(\d+)\s*\[(\d+),(\d+)]\s*([-\d.]+)\s*' +
r'([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*' +
r'([\S]+)',
text,
re.M
)
results = list(zip(*results))
nlat = 40
nlon = 180
ntime = len(results[0]) / nlon / nlat
if ntime != int(ntime):
raise ValueError
ntime = int(ntime)
mlat_arr = np.array(results[3]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
mlon_arr = np.array(results[4]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
EF_N_arr = np.array(results[5]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
EF_E_arr = np.array(results[6]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
v_N_arr = np.array(results[7]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
v_E_arr = np.array(results[8]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
phi_arr = np.array(results[9]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
dts = np.array(results[10])[::nlon * nlat]
dts = [datetime.datetime.strptime(dtstr, "%Y-%m-%d/%H:%M:%S") for dtstr in dts]
time_array = np.array(cftime.date2num(dts, units='seconds since 1970-01-01 00:00:00.0'))
import aacgmv2
mlt_arr = np.empty_like(mlat_arr)
for i in range(ntime):
mlt1 = aacgmv2.convert_mlt(mlon_arr[i].flatten(), dts[i]).reshape((nlon, nlat))
mlt_arr[i, ::] = mlt1[::]
fp = pathlib.Path(file_path.with_suffix('.nc'))
fp.parent.resolve().mkdir(parents=True, exist_ok=True)
fnc = nc.Dataset(fp, 'w')
fnc.createDimension('UNIX_TIME', ntime)
fnc.createDimension('MLAT', nlat)
fnc.createDimension('MLON', nlon)
fnc.title = "SuperDARN Potential maps"
time = fnc.createVariable('UNIX_TIME', np.float64, ('UNIX_TIME',))
time.units = 'seconds since 1970-01-01 00:00:00.0'
time[::] = time_array[::]
mlat = fnc.createVariable('MLAT', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
mlat[::] = mlat_arr[::]
mlon = fnc.createVariable('MLON', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
mlon[::] = mlon_arr[::]
mlt = fnc.createVariable('MLT', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
mlt[::] = mlt_arr[::]
EF_N = fnc.createVariable('E_N', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
EF_N[::] = EF_N_arr[::]
EF_E = fnc.createVariable('E_E', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
EF_E[::] = EF_E_arr[::]
v_N = fnc.createVariable('v_i_N', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
v_N[::] = v_N_arr[::]
v_E = fnc.createVariable('v_i_E', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
v_E[::] = v_E_arr[::]
phi = fnc.createVariable('phi', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
phi[::] = phi_arr[::]
print('From {} to {}.'.format(
datetime.datetime.utcfromtimestamp(time_array[0]),
datetime.datetime.utcfromtimestamp(time_array[-1]))
)
mylog.StreamLogger.info(
"The requested SuperDARN map potential data has been saved in the file {}.".format(fp))
fnc.close()
self.done = True
if __name__ == "__main__":
dt_fr1 = datetime.datetime(2016, 3, 15)
dt_to1 = datetime.datetime(2016, 3, 15)
Downloader(dt_fr1, dt_to1)
```
#### File: sources/wdc/__init__.py
```python
from geospacelab import preferences
import geospacelab.datahub.sources.madrigal.utilities as utilities
from geospacelab.datahub import DatabaseModel
class WDCDatabase(DatabaseModel):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in, **kwargs)
return obj
wdc_database = WDCDatabase('WDC')
wdc_database.url = 'http://wdc.kugi.kyoto-u.ac.jp/'
wdc_database.category = 'online database'
wdc_database.Notes = '''
- Data Usage Rules
The rules for the data use and exchange are defined by the International Council for Science - World Data System (ICSU-WDS) Data Sharing Principles.
The data and services at the WDC Kyoto are available for scientific use without restrictions,
but for the real-time (quicklook) data, please contact our staff (<EMAIL>) before using those in publications and presentations.
The WDC Kyoto does not allow commercial applications of the geomagnetic indices.
'''
try:
default_user_email = preferences.user_config['datahub']['wdc']['user_email']
except KeyError:
if preferences._on_rtd:
default_user_email = '<EMAIL>'
save = 'y'
else:
print("Inputs for accessing the WDC (wdc.kugi.kyoto-u.ac.jp) database.")
default_user_email = input("User's email: ")
save = input("Save as default? [y]/n: ")
if save.lower() in ['', 'y', 'yes']:
uc = preferences.user_config
uc.setdefault('datahub', {})
uc['datahub'].setdefault('wdc', {})
uc['datahub']['wdc']['user_email'] = default_user_email
preferences.set_user_config(user_config=uc, set_as_default=True)
```
#### File: empiricalmodels/ovationprime2010/ovationprime.py
```python
import os
import datetime
from collections import OrderedDict
import numpy as np
from scipy import interpolate
# from ovationprime import ovation_utilities
# from ovationprime.ovation_utilities import robinson_auroral_conductance
# from ovationprime.ovation_utilities import brekke_moen_solar_conductance
# import geospacepy
# from geospacepy import special_datetime, sun, satplottools
import aacgmv2 # available on pip
# import apexpy
from logbook import Logger
log = Logger('OvationPyme.ovation_prime')
# Determine where this module's source file is located
# to determine where to look for the tables
src_file_dir = os.path.dirname(os.path.realpath(__file__))
ovation_datadir = os.path.join(src_file_dir, 'data')
def _check_for_old_jtype(estimator, type_of_flux):
"""Check the type of flux (2nd constructor argument) of
a FluxEstimator or SeasonalFluxEstimator class and
raise an extensive error to inform user that they need
to modify their calling function, and why"""
name = estimator.__class__.__name__
explaination = ('Constructor interface to {} has changed'.format(name)
+ ' now the only valid second argument values'
+ ' (for type of flux) are "energy" or "number".\n'
+ ' Formerly, the second argument could take values'
+ ' which confused types of flux with types of aurora'
+ ' (e.g. you could specify ion or electron, which'
+ ' is a property of the auroral type (choose "ion"'
+ ' auroral type to get ion fluxes).\n'
+ ' If you wish to calculate average energy, you'
+ ' will need to switch from a FluxEstimator class'
+ ' to an AverageEnergyEstimator class')
if type_of_flux not in ['energy', 'number']:
raise RuntimeError('{} is not a valid fluxtype.\n{}'.format(type_of_flux,
explaination))
class LatLocaltimeInterpolator(object):
def __init__(self, mlat_grid, mlt_grid, var):
self.mlat_orig = mlat_grid
self.mlt_orig = mlt_grid
self.zvar = var
n_north, n_south = np.count_nonzero(self.mlat_orig > 0.), np.count_nonzero(self.mlat_orig < 0.)
if n_south == 0.:
self.hemisphere = 'N'
elif n_north == 0.:
self.hemisphere = 'S'
else:
raise ValueError(
'Latitude grid contains northern (N={0}) and southern (N={1}) values.'.format(n_north, n_south) + \
' Can only interpolate one hemisphere at a time.')
def interpolate(self, new_mlat_grid, new_mlt_grid, method='nearest'):
"""
Rectangularize and Interpolate (using Linear 2D interpolation)
"""
X0, Y0 = satplottools.latlt2cart(self.mlat_orig.flatten(), self.mlt_orig.flatten(), self.hemisphere)
X, Y = satplottools.latlt2cart(new_mlat_grid.flatten(), new_mlt_grid.flatten(), self.hemisphere)
interpd_zvar = interpolate.griddata((X0, Y0), self.zvar.flatten(), (X, Y), method=method, fill_value=0.)
return interpd_zvar.reshape(new_mlat_grid.shape)
class BinCorrector(object):
"""
We've found that often there are strange outlier bins that show up in
OvationPyme results. This attempts to identify them by computing a numerical
derivative around each ring of constant latitude.
"""
def __init__(self, mlat_grid, mlt_grid):
self.mlat_grid = mlat_grid
self.mlats = self.mlat_grid[:, 0].flatten()
self.mlt_grid = mlt_grid
self.mlts = self.mlt_grid[0, :].flatten()
self.dy_thresh = None
def fix(self, y_grid, min_mlat=49, max_mlat=75, label=''):
"""
Compute derivatives and attempt to identify bad bins
Assumes mlat varies along the first dimension of the gridded location
arrays
"""
debug = False
plot = False
bad_bins = np.zeros_like(y_grid, dtype=bool)
y_grid_corr = y_grid.copy()
if self.dy_thresh is None:
self.dy_thresh = 3. * np.nanstd(np.diff(y_grid.flatten()))
wraparound = lambda x, nwrap: np.concatenate([x[-1 * (nwrap + 1):-1], x, x[:nwrap]])
for i_mlat, mlat in enumerate(self.mlats):
if not (np.abs(mlat) >= min_mlat and np.abs(mlat) <= max_mlat):
if debug:
log.debug('MLAT ring at {0} mlat is not between'.format(mlat)
+ ' {0} and {1}'.format(min_mlat, max_mlat)
+ ' skipping')
continue
mlts_nowrap = self.mlt_grid[i_mlat, :].copy()
mlts_nowrap[mlts_nowrap < 0] += 24
mlts_nowrap[-1] = 23.9
y = y_grid[i_mlat, :]
# Wrap around first and last nwarp indicies in MLT
# this prevents out of bounds errors in the spline/derviative
nwrap = 4 # Pchip is cubic so order+1
mlts = wraparound(mlts_nowrap, nwrap)
mlts[:nwrap] -= 24. # to keep mlt in increasing order
mlts[-1 * nwrap:] += 24.
y = wraparound(y, nwrap)
# y_i = interpolate.PchipInterpolator(mlts, y)
dy = np.diff(np.concatenate([y[:1], y])) # compute 1st derivative of spline
i_dy = interpolate.interp1d(mlts, dy, kind='nearest')
mlt_mask = np.ones_like(mlts, dtype=bool)
for i_mlt, mlt in enumerate(mlts_nowrap.flatten()):
if np.abs(i_dy(mlt)) > self.dy_thresh:
bad_bins[i_mlat, i_mlt] = True
mlt_mask[i_mlt + nwrap] = False
y_corr_i = interpolate.PchipInterpolator(mlts[mlt_mask], y[mlt_mask])
y_corr = y_corr_i(mlts)
y_grid_corr[i_mlat, :] = y_corr_i(mlts_nowrap)
if plot:
self.plot_single_spline(mlat, mlts, y, dy, mlt_mask, y_corr, label=label)
return y_grid_corr
def plot_single_spline(self, mlat, mlts, y, dy, mlt_mask, y_corr, label=''):
import matplotlib.pyplot as plt
f = plt.figure(figsize=(8, 6))
ax = f.add_subplot(111)
ax.plot(mlts, y, 'bo', label='data')
ax.plot(mlts, dy, 'r-', label='Deriv')
bad_bins = np.logical_not(mlt_mask)
ax.plot(mlts, y_corr, 'g.', label='After Correction')
ax.plot(mlts[bad_bins], y[bad_bins], 'rx',
label='Bad@dy>{0:.1f}'.format(self.dy_thresh))
ax.set_title('Spline fit (mlat={0:.1f})'.format(mlat))
ax.set_xlabel('MLT')
ax.legend()
if not os.path.exists('/tmp/ovationpyme'):
os.makedirs('/tmp/ovationpyme')
f.savefig('/tmp/ovationpyme/ovationpyme_spline_{0}_{1}.png'.format(label, np.floor(mlat * 10)))
plt.close(f)
def __call__(self, y):
"""
"""
return self.fix(y)
class ConductanceEstimator(object):
"""
Implements the 'Robinson Formula'
for estimating Pedersen and Hall height integrated
conductivity (conducance) from
average electron energy and
total electron energy flux
(assumes a Maxwellian electron energy distribution)
"""
def __init__(self, fluxtypes=['diff']):
# Use diffuse aurora only
self.numflux_estimator = {}
self.eavg_estimator = {}
for fluxtype in fluxtypes:
self.numflux_estimator[fluxtype] = FluxEstimator(fluxtype, 'number')
self.eavg_estimator[fluxtype] = AverageEnergyEstimator(fluxtype)
def get_conductance(self, dt, hemi='N', solar=True, auroral=True, background_p=None, background_h=None,
conductance_fluxtypes=['diff'], interp_bad_bins=True,
return_dF=False, return_f107=False,
dnflux_bad_thresh=1.0e8, deavg_bad_thresh=.3):
"""
Compute total conductance using Robinson formula and emperical solar conductance model
"""
log.notice(
"Getting conductance with solar {0}, aurora {1}, fluxtypes {2}, background_ped: {3}, background_hall {4}".format(
solar,
auroral, conductance_fluxtypes, background_p, background_h))
all_sigp_auroral, all_sigh_auroral = [], []
# Create a bin interpolation corrector
for fluxtype in conductance_fluxtypes:
mlat_grid, mlt_grid, numflux_grid, dF = self.numflux_estimator[fluxtype].get_flux_for_time(dt, hemi=hemi,
return_dF=True)
# mlat_grid, mlt_grid, energyflux_grid = self.energyflux_estimator.get_flux_for_time(dt, hemi=hemi)
mlat_grid, mlt_grid, eavg_grid = self.eavg_estimator[fluxtype].get_eavg_for_time(dt, hemi=hemi)
if interp_bad_bins:
# Clean up any extremely large bins
fixer = BinCorrector(mlat_grid, mlt_grid)
# Fix numflux
fixer.dy_thresh = dnflux_bad_thresh
numflux_grid = fixer.fix(numflux_grid, label='nflux_{0}'.format(fluxtype))
# Fix avg energy
fixer.dy_thresh = deavg_bad_thresh
eavg_grid = fixer.fix(eavg_grid, label='eavg_{0}'.format(fluxtype))
# zero out lowest latitude numflux row because it makes no sense
# has some kind of artefact at post midnight
bad = np.abs(mlat_grid) < 52.0
numflux_grid[bad] = 0.
# raise RuntimeError('Debug stop!')
aur_conds = robinson_auroral_conductance(numflux_grid, eavg_grid)
this_sigp_auroral, this_sigh_auroral = aur_conds
all_sigp_auroral.append(this_sigp_auroral)
all_sigh_auroral.append(this_sigh_auroral)
sigp_solar, sigh_solar, f107 = self.solar_conductance(dt, mlat_grid, mlt_grid, return_f107=True)
total_sigp_sqrd = np.zeros_like(sigp_solar)
total_sigh_sqrd = np.zeros_like(sigh_solar)
if solar:
total_sigp_sqrd += sigp_solar ** 2
total_sigh_sqrd += sigh_solar ** 2
if auroral:
# Sum up all contributions (sqrt of summed squares)
for sigp_auroral, sigh_auroral in zip(all_sigp_auroral, all_sigh_auroral):
total_sigp_sqrd += sigp_auroral ** 2
total_sigh_sqrd += sigh_auroral ** 2
# sigp_auroral *= 1.5
# sigh_auroral *= 1.5
# Now take square root to get hall and pedersen conductance
if solar or auroral:
sigp = np.sqrt(total_sigp_sqrd)
sigh = np.sqrt(total_sigh_sqrd)
else:
# No conductance except flat background
sigp = total_sigp_sqrd
sigh = total_sigh_sqrd
if background_h is not None and background_p is not None:
# Cousins et. al. 2015, nightside artificial background of 4 Siemens
# Ellen found this to be the background nightside conductance level which
# best optimizes the SuperDARN ElePot AMIE ability to predict AMPERE deltaB data, and
# the AMPERE MagPot AMIE ability to predict SuperDARN LOS V
sigp[sigp < background_p] = background_p
sigh[sigh < background_h] = background_h
if return_dF and return_f107:
return mlat_grid, mlt_grid, sigp, sigh, dF, f107
elif return_dF:
return mlat_grid, mlt_grid, sigp, sigh, dF
elif return_f107:
return mlat_grid, mlt_grid, sigp, sigh, f107
else:
return mlat_grid, mlt_grid, sigp, sigh
def solar_conductance(self, dt, mlats, mlts, return_f107=False):
"""
Estimate the solar conductance using methods from:
Cousins, <NAME>., <NAME>, and <NAME> (2015), Mapping
high-latitude ionospheric electrodynamics with SuperDARN and AMPERE
--which cites--
<NAME>, <NAME>, Observations of high latitude ionospheric conductances
Maybe is not good for SZA for southern hemisphere? Don't know
Going to use absolute value of latitude because that's what's done
in Cousins IDL code.
"""
# Find the closest hourly f107 value
# to the current time to specifiy the conductance
f107 = ovation_utilities.get_daily_f107(dt)
if hasattr(self, '_f107'):
log.warning(('Warning: Overriding real F107 {0}'.format(f107)
+ 'with secret instance property _f107 {0}'.format(self._f107)
+ 'this is for debugging and will not'
+ 'produce accurate results for a particular date.'))
f107 = self._f107
# print "F10.7 = %f" % (f107)
# Convert from magnetic to geocentric using the AACGMv2 python library
flatmlats, flatmlts = mlats.flatten(), mlts.flatten()
flatmlons = aacgmv2.convert_mlt(flatmlts, dt, m2a=True)
try:
glats, glons = aacgmv2.convert(flatmlats, flatmlons, 110. * np.ones_like(flatmlats),
date=dt, a2g=True, geocentric=False)
except AttributeError:
# convert method was deprecated
glats, glons, r = aacgmv2.convert_latlon_arr(flatmlats,
flatmlons,
110.,
dt,
method_code='A2G')
sigp, sigh = brekke_moen_solar_conductance(dt, glats, glons, f107)
sigp_unflat = sigp.reshape(mlats.shape)
sigh_unflat = sigh.reshape(mlats.shape)
if return_f107:
return sigp_unflat, sigh_unflat, f107
else:
return sigp_unflat, sigh_unflat
class AverageEnergyEstimator(object):
"""A class which estimates average energy by estimating both
energy and number flux
"""
def __init__(self, atype, numflux_threshold=5.0e7):
self.numflux_threshold = numflux_threshold
self.numflux_estimator = FluxEstimator(atype, 'number')
self.energyflux_estimator = FluxEstimator(atype, 'energy')
def get_eavg_for_time(self, dt, hemi='N', return_dF=False, combine_hemispheres=True):
kwargs = {
'hemi': hemi,
'combine_hemispheres': combine_hemispheres,
'return_dF': True
}
grid_mlats, grid_mlts, gridnumflux, dF = self.numflux_estimator.get_flux_for_time(dt, **kwargs)
grid_mlats, grid_mlts, gridenergyflux, dF = self.energyflux_estimator.get_flux_for_time(dt, **kwargs)
grideavg = (gridenergyflux / 1.6e-12) / gridnumflux # energy flux Joules->eV
grideavg = grideavg / 1000. # eV to keV
# Limit to reasonable number fluxes
n_pts = len(grideavg.flatten())
n_low_numflux = np.count_nonzero(gridnumflux < self.numflux_threshold)
grideavg[gridnumflux < self.numflux_threshold] = 0.
log.debug(('Zeroed {:d}/{:d} average energies'.format(n_low_numflux, n_pts)
+ 'with numflux below {:e}'.format(self.numflux_threshold)))
# Limit to DMSP SSJ channels range
n_over = np.count_nonzero(grideavg > 30)
n_under = np.count_nonzero(grideavg < .5)
log.debug('Zeroed {:d}/{:d} average energies over 30 keV'.format(n_over, n_pts))
log.debug('Zeroed {:d}/{:d} average energies under .2 keV'.format(n_under, n_pts))
grideavg[grideavg > 30.] = 30. # Max of 30keV
grideavg[grideavg < .2] = 0. # Min of 1 keV
if not return_dF:
return grid_mlats, grid_mlts, grideavg
else:
return grid_mlats, grid_mlts, grideavg, dF
class FluxEstimator(object):
"""
A class which estimates auroral flux
based on the Ovation Prime regressions,
at arbitrary locations and times.
Locations are in magnetic latitude and local
time, and are interpolated using a B-spline
representation
"""
def __init__(self, atype, energy_or_number, seasonal_estimators=None):
"""
doy - int
day of year
atype - str, ['diff','mono','wave','ions']
type of aurora for which to load regression coeffients
energy_or_number - str, ['energy','number']
Type of flux you want to estimate
seasonal_estimators - dict, optional
A dictionary of SeasonalFluxEstimators for seasons
'spring','fall','summer','winter', if you
don't want to create them
(for efficiency across multi-day calls)
"""
self.atype = atype # Type of aurora
# Check for legacy values of this argument
_check_for_old_jtype(self, energy_or_number)
self.energy_or_number = energy_or_number # Type of flux
seasons = ['spring', 'summer', 'fall', 'winter']
if seasonal_estimators is None:
# Make a seasonal estimator for each season with nonzero weight
self.seasonal_flux_estimators = {season: SeasonalFluxEstimator(season, atype, energy_or_number) for season
in seasons}
else:
# Ensure the passed seasonal estimators are approriate for this atype and jtype
for season, estimator in seasonal_estimators.items():
jtype_atype_ok = jtype_atype_ok and (self.jtype == estimator.jtype and self.atype == estimator.atype)
if not jtype_atype_ok:
raise RuntimeError(
'Auroral and flux type of SeasonalFluxEstimators do not match {0} and {1}!'.format(self.atype,
self.jtype))
def season_weights(self, doy):
"""
Determines the relative weighting of the
model coeffecients for the various seasons for a particular
day of year (doy). Nominally, weights the seasons
based on the difference between the doy and the peak
of the season (solstice/equinox)
Returns:
a dictionary with a key for each season.
Each value in the dicionary is a float between 0 and 1
"""
weight = OrderedDict(winter=0.,
spring=0.,
summer=0.,
fall=0.)
if doy >= 79. and doy < 171:
weight['summer'] = 1. - (171. - doy) / 92.
weight['spring'] = 1. - weight['summer']
elif doy >= 171. and doy < 263.:
weight['fall'] = 1. - (263. - doy) / 92.
weight['summer'] = 1. - weight['fall']
elif doy >= 263. and doy < 354.:
weight['winter'] = 1. - (354. - doy) / 91.
weight['fall'] = 1. - weight['winter']
elif doy >= 354 or doy < 79:
# For days of year > 354, subtract 365 to get negative
# day of year values for computation
doy0 = doy - 365. if doy >= 354 else doy
weight['spring'] = 1. - (79. - doy0) / 90.
weight['winter'] = 1. - weight['spring']
return weight
def get_season_fluxes(self, dF, weights):
"""
Extract the flux for each season and hemisphere and
store them in a dictionary
Return positive latitudes, since northern and southern
latitude/localtime grids are the same
"""
seasonfluxesN, seasonfluxesS = OrderedDict(), OrderedDict()
gridmlats, gridmlts = None, None
for season, estimator in self.seasonal_flux_estimators.items():
if weights[season] == 0.:
continue # Skip calculation for seasons with zero weight
flux_outs = estimator.get_gridded_flux(dF)
gridmlatsN, gridmltsN, gridfluxN = flux_outs[:3]
gridmlatsS, gridmltsS, gridfluxS = flux_outs[3:]
seasonfluxesN[season] = gridfluxN
seasonfluxesS[season] = gridfluxS
gridmlats = gridmlatsN
gridmlts = gridmltsN
return gridmlats, gridmlts, seasonfluxesN, seasonfluxesS
def get_flux_for_time(self, dt,
hemi='N', return_dF=False, combine_hemispheres=True):
"""
The weighting of the seasonal flux for the different hemispheres
is a bit counterintuitive, but after some investigation of the flux
patterns produced for Northern and Southern hemispheres using a
particular SeasonalFluxEstimator (which in turn reads a particular
season's coefficients file), it seems like the 'summer' coefficients
file contains the northern hemisphere coefficients for Boreal Summer
(roughly May-August) and the southern hemisphere coefficients for
Austral Summer (roughly November-February).
In earlier versions of this code, the season weighting was wrong,
because the code operated on the assumption that 'summer'
meant Boreal summer, and specified a range of dates for the data
used to construct the coefficients.
In the IDL version of this model, the flux produced for
Northern and Southern hemispheres is averaged, with the following
comment on the IDL keyword argument:
;n_or_s=3 for combine north and south. In effect this is the only
;option that works. The result is appropriate to the northern
;hemisphere. To get a result appropriate to the southern hemisphere,
;call with doy = 365 - actual doy
Combining hemispheres is probably nessecary because
there are data gaps (particularly in the northern hemisphere dawn)
so this is the default behavior here as well. This can be overriden
by passing combine_hemispheres=False
"""
doy = dt.timetuple().tm_yday
if not combine_hemispheres:
log.warning(('Warning: IDL version of OP2010 always combines hemispheres.'
+ 'know what you are doing before switching this behavior'))
if hemi == 'N':
weights = self.season_weights(doy)
elif hemi == 'S':
weights = self.season_weights(365. - doy)
else:
raise ValueError('Invalid hemisphere {0} (use N or S)'.format(hemi))
dF = ovation_utilities.calc_dF(dt)
if hasattr(self, '_dF'):
log.warning(('Warning: Overriding real Newell Coupling {0}'.format(dF)
+ 'with secret instance property _dF {0}'.format(self._dF)
+ 'this is for debugging and will not'
+ 'produce accurate results for a particular date'))
dF = self._dF
season_fluxes_outs = self.get_season_fluxes(dF, weights)
grid_mlats, grid_mlts, seasonfluxesN, seasonfluxesS = season_fluxes_outs
gridflux = np.zeros_like(grid_mlats)
for season, W in weights.items():
if W == 0.:
continue
gridfluxN = seasonfluxesN[season]
gridfluxS = seasonfluxesS[season]
if combine_hemispheres:
gridflux += W * (gridfluxN + gridfluxS) / 2
elif hemi == 'N':
gridflux += W * gridfluxN
elif hemi == 'S':
gridflux += W * gridfluxS
if hemi == 'S':
grid_mlats = -1. * grid_mlats # by default returns positive latitudes
if not return_dF:
return grid_mlats, grid_mlts, gridflux
else:
return grid_mlats, grid_mlts, gridflux, dF
class SeasonalFluxEstimator(object):
"""
A class to hold and caculate predictions from the regression coeffecients
which are tabulated in the data/premodel/{season}_{atype}_*.txt
files.
Given a particular season, type of aurora ( one of ['diff','mono','wave'])
and type of flux, returns
"""
_valid_atypes = ['diff', 'mono', 'wave', 'ions']
def __init__(self, season, atype, energy_or_number):
"""
season - str,['winter','spring','summer','fall']
season for which to load regression coeffients
atype - str, ['diff','mono','wave','ions']
type of aurora for which to load regression coeffients, ions
are not implemented
energy_or_number - str, ['energy','number']
type of flux you want to estimate
"""
nmlt = 96 # number of mag local times in arrays (resolution of 15 minutes)
nmlat = 160 # number of mag latitudes in arrays (resolution of 1/4 of a degree (.25))
ndF = 12 # number of coupling strength bins
self.n_mlt_bins, self.n_mlat_bins, self.n_dF_bins = nmlt, nmlat, ndF
self.atype = atype
if atype not in self._valid_atypes:
raise ValueError(('Not a valid aurora type {}.'.format(atype)
+ 'valid values {}'.format(self._valid_atypes)))
# Check for legacy values of this argument
_check_for_old_jtype(self, energy_or_number)
self.energy_or_number = energy_or_number
# The mlat bins are orgainized like -50:-dlat:-90, 50:dlat:90
self.mlats = np.concatenate([np.linspace(-90., -50., self.n_mlat_bins // 2)[::-1],
np.linspace(50., 90., self.n_mlat_bins // 2)])
self.mlts = np.linspace(0., 24., self.n_mlt_bins)
# Determine file names
file_suffix = '_n' if energy_or_number == 'number' else ''
self.afile = os.path.join(ovation_datadir, 'premodel/{0}_{1}{2}.txt'.format(season, atype, file_suffix))
self.pfile = os.path.join(ovation_datadir, 'premodel/{0}_prob_b_{1}.txt'.format(season, atype))
# Defualt values of header (don't know why need yet)
# b1 = 0.
# b2 = 0.
# yend = 1900
# dend = 1
# y0 = 1900
# d0 = 1
# files_done = 0
# sf0 = 0
with open(self.afile, 'r') as f:
aheader = f.readline() # y0,d0,yend,dend,files_done,sf0
# print "Read Auroral Flux Coefficient File %s,\n Header: %s" % (self.afile,aheader)
# Don't know if it will read from where f pointer is after reading header line
adata = np.genfromtxt(f, max_rows=nmlat * nmlt)
# print "First line was %s" % (str(adata[0,:]))
# These are the coefficients for each bin which are used
# in the predicted flux calulation for electron auroral types
# and for ions
self.b1a, self.b2a = np.zeros((nmlt, nmlat)), np.zeros((nmlt, nmlat))
self.b1a.fill(np.nan)
self.b2a.fill(np.nan)
mlt_bin_inds, mlat_bin_inds = adata[:, 0].astype(int), adata[:, 1].astype(int)
self.b1a[mlt_bin_inds, mlat_bin_inds] = adata[:, 2]
self.b2a[mlt_bin_inds, mlat_bin_inds] = adata[:, 3]
self.b1p = np.full((nmlt, nmlat), np.nan)
self.b2p = np.full((nmlt, nmlat), np.nan)
self.prob = np.full((nmlt, nmlat, ndF), np.nan)
# pdata has 2 columns, b1, b2 for first 15361 rows
# pdata has nmlat*nmlt rows (one for each positional bin)
# Electron auroral types also include a probability in their
# predicted flux calculations (related to the probability of
# observing one type of aurora versus another)
if atype in ['diff', 'mono', 'wave']:
with open(self.pfile, 'r') as f:
pheader = f.readline() # y0,d0,yend,dend,files_done,sf0
# Don't know if it will read from where f pointer is after reading header line
pdata_b = np.genfromtxt(f, max_rows=nmlt * nmlat) # 2 columns, b1 and b2
# print "Shape of b1p,b2p should be nmlt*nmlat=%d, is %s" % (nmlt*nmlat,len(pdata_b[:,0]))
pdata_p = np.genfromtxt(f, max_rows=nmlt * nmlat * ndF) # 1 column, pval
# in the file the probability is stored with coupling strength bin
# varying fastest (this is Fortran indexing order)
pdata_p_column_dFbin = pdata_p.reshape((-1, ndF), order='F')
# mlt is first dimension
self.b1p[mlt_bin_inds, mlat_bin_inds] = pdata_b[:, 0]
self.b2p[mlt_bin_inds, mlat_bin_inds] = pdata_b[:, 1]
for idF in range(ndF):
self.prob[mlt_bin_inds, mlat_bin_inds, idF] = pdata_p_column_dFbin[:, idF]
# IDL original read
# readf,20,i,j,b1,b2,rF
# ;; b1a_all(atype, iseason,i,j) = b1
# ;; b2a_all(atype, iseason,i,j) = b2
# adata has 5 columns, mlt bin number, mlat bin number, b1, b2, rF
# adata has nmlat*nmlt rows (one for each positional bin)
def which_dF_bin(self, dF):
"""
Given a coupling strength value, finds the bin it falls into
"""
dFave = 4421. # Magic numbers!
dFstep = dFave / 8.
i_dFbin = np.round(dF / dFstep)
# Range check 0 <= i_dFbin <= n_dF_bins-1
if i_dFbin < 0 or i_dFbin > self.n_dF_bins - 1:
i_dFbin = 0 if i_dFbin < 0 else self.n_dF_bins - 1
return int(i_dFbin)
def prob_estimate(self, dF, i_mlt_bin, i_mlat_bin):
"""
Estimate probability of <something> by using tabulated
linear regression coefficients ( from prob_b files )
WRT coupling strength dF (which are different for each position bin)
If p doesn't come out sensible by the initial regression,
(i.e both regression coefficients are zero)
then tries loading from the probability array. If the value
in the array is zero, then estimates a value using adjacent
coupling strength bins in the probability array
"""
# Look up the regression coefficients
b1, b2 = self.b1p[i_mlt_bin, i_mlat_bin], self.b2p[i_mlt_bin, i_mlat_bin]
p = b1 + b2 * dF # What is this the probability of?
# range check 0<=p<=1
if p < 0. or p > 1.:
p = 1. if p > 1. else 0.
if b1 == 0. and b2 == 0.:
i_dFbin = self.which_dF_bin(dF)
# Get the tabulated probability
p = self.prob[i_mlt_bin, i_mlat_bin, i_dFbin]
if p == 0.:
# If no tabulated probability we must estimate by interpolating
# between adjacent coupling strength bins
i_dFbin_1 = i_dFbin - 1 if i_dFbin > 0 else i_dFbin + 2 # one dF bin before by preference, two after in extremis
i_dFbin_2 = i_dFbin + 1 if i_dFbin < self.n_dF_bins - 1 else i_dFbin - 2 # one dF bin after by preference, two before in extremis
p = (self.prob[i_mlt_bin, i_mlat_bin, i_dFbin_1] + self.prob[i_mlt_bin, i_mlat_bin, i_dFbin_2]) / 2.
return p
def estimate_auroral_flux(self, dF, i_mlt_bin, i_mlat_bin):
"""
Does what it says on the tin,
estimates the flux using the regression coeffecients in the 'a' files
"""
b1, b2 = self.b1a[i_mlt_bin, i_mlat_bin], self.b2a[i_mlt_bin, i_mlat_bin]
# There are no spectral types for ions, so there is no need
# to weight the predicted flux by a probability
if self.atype == 'ions':
p = 1.
else:
p = self.prob_estimate(dF, i_mlt_bin, i_mlat_bin)
# print(p, b1, b2, dF)
flux = (b1 + b2 * dF) * p
return self.correct_flux(flux)
def correct_flux(self, flux):
"""
A series of magical (unexplained, unknown) corrections to flux given a particular
type of flux
"""
fluxtype = self.energy_or_number
if flux < 0.:
flux = 0.
if self.atype is not 'ions':
# Electron Energy Flux
if fluxtype == 'energy':
if flux > 10.:
flux = 0.5
elif flux > 5.:
flux = 5.
# Electron Number Flux
elif fluxtype == 'number':
if flux > 2.0e9:
flux = 1.0e9
elif flux > 2.0e10:
flux = 0.
else:
# Ion Energy Flux
if fluxtype == 'energy':
if flux > 2.:
flux = 2.
elif flux > 4.:
flux = 0.25
# Ion Number Flux
if fluxtype == 'number':
if flux > 1.0e8:
flux = 1.0e8
elif flux > 5.0e8:
flux = 0.
return flux
def get_gridded_flux(self, dF, combined_N_and_S=False, interp_N=True):
"""
Return the flux interpolated onto arbitary locations
in mlats and mlts
combined_N_and_S, bool, optional
Average the fluxes for northern and southern hemisphere
and use them for both hemispheres (this is what standard
ovation prime does always I think, so I've made it default)
The original code says that this result is appropriate for
the northern hemisphere, and to use 365 - actual doy to
get a combined result appropriate for the southern hemisphere
interp_N, bool, optional
Interpolate flux linearly for each latitude ring in the wedge
of low coverage in northern hemisphere dawn/midnight region
"""
fluxgridN = np.zeros((self.n_mlat_bins // 2, self.n_mlt_bins))
fluxgridN.fill(np.nan)
# Make grid coordinates
mlatgridN, mltgridN = np.meshgrid(self.mlats[self.n_mlat_bins // 2:], self.mlts, indexing='ij')
fluxgridS = np.zeros((self.n_mlat_bins // 2, self.n_mlt_bins))
fluxgridS.fill(np.nan)
# Make grid coordinates
mlatgridS, mltgridS = np.meshgrid(self.mlats[:self.n_mlat_bins // 2], self.mlts, indexing='ij')
# print(self.mlats[:self.n_mlat_bins//2])
for i_mlt in range(self.n_mlt_bins):
for j_mlat in range(self.n_mlat_bins // 2):
# The mlat bins are orgainized like -50:-dlat:-90,50:dlat:90
fluxgridN[j_mlat, i_mlt] = self.estimate_auroral_flux(dF, i_mlt, self.n_mlat_bins // 2 + j_mlat)
fluxgridS[j_mlat, i_mlt] = self.estimate_auroral_flux(dF, i_mlt, j_mlat)
if interp_N:
fluxgridN, inwedge = self.interp_wedge(mlatgridN, mltgridN, fluxgridN)
self.inwedge = inwedge
if not combined_N_and_S:
return mlatgridN, mltgridN, fluxgridN, mlatgridS, mltgridS, fluxgridS
else:
return mlatgridN, mltgridN, (fluxgridN + fluxgridS) / 2.
def interp_wedge(self, mlatgridN, mltgridN, fluxgridN):
"""
Interpolates across the wedge shaped data gap
around 50 magnetic latitude and 23-4 MLT.
Interpolation is performed individually
across each magnetic latitude ring,
only missing flux values are filled with the
using the interpolant
"""
# Constants copied verbatim from IDL code
x_mlt_min = -1.0 # minimum MLT for interpolation [hours] --change if desired
x_mlt_max = 4.0 # maximum MLT for interpolation [hours] --change if desired
x_mlat_min = 49.0 # minimum MLAT for interpolation [degrees]
# x_mlat_max=67.0
x_mlat_max = 75.0 # maximum MLAT for interpolation [degrees] --change if desired (LMK increased this from 67->75)
valid_interp_mlat_bins = np.logical_and(mlatgridN[:, 0] >= x_mlat_min, mlatgridN[:, 0] <= x_mlat_max).flatten()
inwedge = np.zeros(fluxgridN.shape, dtype=bool) # Store where we did interpolation
for i_mlat_bin in np.flatnonzero(valid_interp_mlat_bins).tolist():
# Technically any row in the MLT grid would do, but for consistancy use the i_mlat_bin-th one
this_mlat = mlatgridN[i_mlat_bin, 0].copy()
this_mlt = mltgridN[i_mlat_bin, :].copy()
this_flux = fluxgridN[i_mlat_bin, :].copy()
# Change from 0-24 MLT to -12 to 12 MLT, so that there is no discontiunity at midnight
# when we interpolate
this_mlt[this_mlt > 12.] = this_mlt[this_mlt > 12.] - 24.
valid_interp_mlt_bins = np.logical_and(this_mlt >= x_mlt_min, this_mlt <= x_mlt_max).flatten()
mlt_bins_missing_flux = np.logical_not(this_flux > 0.).flatten()
interp_bins_missing_flux = np.logical_and(valid_interp_mlt_bins, mlt_bins_missing_flux)
inwedge[i_mlat_bin, :] = interp_bins_missing_flux
if np.count_nonzero(interp_bins_missing_flux) > 0:
# Bins right next to missing wedge probably have bad statistics, so
# don't include them
interp_bins_missing_flux_inds = np.flatnonzero(interp_bins_missing_flux)
nedge = 6
for edge_offset in range(1, nedge + 1):
lower_edge_ind = interp_bins_missing_flux_inds[0] - edge_offset
upper_edge_ind = np.mod(interp_bins_missing_flux_inds[-1] + edge_offset,
len(interp_bins_missing_flux))
interp_bins_missing_flux[lower_edge_ind] = interp_bins_missing_flux[
interp_bins_missing_flux_inds[0]]
interp_bins_missing_flux[upper_edge_ind] = interp_bins_missing_flux[
interp_bins_missing_flux_inds[-1]]
interp_source_bins = np.flatnonzero(np.logical_not(interp_bins_missing_flux))
# flux_interp = interpolate.PchipInterpolator(this_mlt[interp_source_bins], this_flux[interp_source_bins])
flux_interp = interpolate.interp1d(this_mlt[interp_source_bins], this_flux[interp_source_bins],
kind='linear')
fluxgridN[i_mlat_bin, interp_bins_missing_flux] = flux_interp(this_mlt[interp_bins_missing_flux])
# print fluxgridN[i_mlat_bin,interp_bins_missing_flux]
# print "For latitude %.1f, replaced %d flux bins between MLT %.1f and %.1f with interpolated flux..." % (this_mlat,
# np.count_nonzero(interp_bins_missing_flux),np.nanmin(this_mlt[interp_bins_missing_flux]),
# np.nanmax(this_mlt[interp_bins_missing_flux]))
# notwedge = np.logical_not(inwedge)
# crazy = np.logical_and(inwedge,fluxgridN>np.nanpercentile(fluxgridN[notwedge],90.))
# fluxgridN[crazy]=np.nanpercentile(fluxgridN[notwedge],90.)
return fluxgridN, inwedge
```
#### File: toolbox/io/dialog.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog as fd
from tkinter.messagebox import showinfo
# create the root window
root = tk.Tk()
root.title('Tkinter Open File Dialog')
root.resizable(False, False)
root.geometry('300x150')
def select_file(**kwargs):
filetypes = (
('text files', '*.txt'),
('All files', '*.*')
)
filename = fd.askopenfilename(
title='Open a file',
initialdir='/',
filetypes=filetypes)
showinfo(
title='Selected File',
message=filename
)
# open button
open_button = ttk.Button(
root,
text='Open a File',
command=select_file
)
open_button.pack(expand=True)
# run the application
root.mainloop()
```
#### File: toolbox/utilities/pyclass.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import copy
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.toolbox.utilities.pybasic as mybasic
def set_object_attributes(obj, *args, dict_deepcopy=True, **kwargs):
append = kwargs.pop('append', False)
logging = kwargs.pop('logging', True)
append_rec = 0
for ind, arg in enumerate(args):
if ind % 2 == 0:
kwargs[arg] = args[ind+1]
else:
continue
for key, value in kwargs.items():
if not hasattr(obj, key):
if not append:
mylog.StreamLogger.warning("Object %s: '%s' is not found in the named attributes!",
mybasic.retrieve_name(obj), key)
append_rec = 1
continue
else:
if logging:
mylog.simpleinfo.info("Object %s: The attribute '%s' is added!", mybasic.retrieve_name(obj), key)
if type(value) is dict and dict_deepcopy is True:
value = copy.deepcopy(value)
setattr(obj, key, value)
if append_rec:
mylog.simpleinfo.info("Object %s: To add the new attribute, set append=True", mybasic.retrieve_name(obj))
def get_object_attributes(obj):
attrs = {}
for name in vars(obj):
if name.startswith("__"):
continue
if name.startswith("_"):
continue
attr = getattr(obj, name)
if callable(attr):
continue
attrs[name] = attr
return attrs
class StrBase(str):
"""
Base class inherits from *str*. Useful when adding additional attributes to a string.
"""
def __new__(cls, str_in):
if issubclass(str_in.__class__, StrBase):
obj = str_in
else:
obj = str.__new__(cls, str_in)
return obj
def config(self, logging=True, **kwargs):
"""
A standard method used in this package to set attributes to the string.
:param logging: If ''True'', show logging.
:type logging: bool, default: True
:param kwargs: Other optional keyword arguments in :meth:'~geospacelab.toolbox.utilities.pyclass.set_object_attributes'
"""
set_object_attributes(self, append=False, logging=logging, **kwargs)
if __name__ == "__main__":
class A(object):
def __init__(self):
self.a = 1
a = A()
b = A()
set_object_attributes(a, 'b', 2, append=True)
print(a.b)
print(b.b)
```
#### File: geospacelab/visualization/__init__.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
from geospacelab.visualization.mpl.ts_viewer import TimeSeriesViewer
from geospacelab.visualization.mpl._base import plt, FigureBase
def mpl_viewer(figure_class=FigureBase, **kwargs):
"""
Create a viewer based on **matplotlib**.
:param figure_class: Optionally use a custom :class:`GeospaceLab Canvas <geospacelab.visualization.mpl._base.Canvas>
instance.
:type figure_class: subclass of :class:`GeospaceLab Canvas <geospacelab.visualization.mpl._base.Canvas>
:param kwargs: Optional keyword arguments as same as in ``plt.figure``
:return: The canvas instance
"""
kwargs.setdefault('FigureClass', figure_class)
fig = plt.figure(**kwargs)
return fig
```
#### File: visualization/mpl/axis_ticks.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import datetime as dt
from matplotlib.ticker import LogFormatterSciNotation
import geospacelab.toolbox.utilities.pydatetime as dttool
import os
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
class DatetimeMajorLocator(mdates.AutoDateLocator):
def __init__(self, tz=None, minticks=4, maxticks=7, interval_multiples=True):
super().__init__(tz=tz, minticks=minticks, maxticks=maxticks, interval_multiples=interval_multiples)
class DatetimeMinorLocator(mdates.AutoDateLocator):
def __init__(self, ax=None, majorlocator=None, tz=None, minticks=12, maxticks=50, interval_multiples=True):
if majorlocator is not None:
tz = majorlocator.tz
# if ax is not None:
# minticks = 2 * len(ax.xaxis.get_major_ticks())
# maxticks = 8 * len(ax.xaxis.get_major_ticks())
super().__init__(tz=tz, minticks=minticks, maxticks=maxticks, interval_multiples=interval_multiples)
class DatetimeMajorFormatter(mdates.AutoDateFormatter):
def __init__(self, locator, scaled: dict or None = None, tz=None, defaultfmt='%Y-%m-%d', *, usetex=True):
super().__init__(locator, tz=None, defaultfmt='%Y-%m-%d', usetex=True)
self.scaled[1 / HOURS_PER_DAY] = formatter_hour_per_day
self.scaled[1 / MINUTES_PER_DAY] = formatter_minute_per_day
if scaled is not None:
self.scaled.update(**scaled)
def formatter_hour_per_day(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
def formatter_minute_per_day(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
def set_timeline(dt_start, dt_stop, **kwargs):
mdlocators = {
1: 'mdates.MicrosecondLocator',
2: 'mdates.SecondLocator',
3: 'mdates.MinuteLocator',
4: 'mdates.HourLocator',
5: 'mdates.DayLocator',
6: 'mdates.MonthLocator',
7: 'mdates.YearLocator',
}
default_second = {
'range': [1, 2, 3, 5, 10, 15, 20, 30, 40, 60],
'majorscale': [10/60, 15/60, 30/60, 1, 2, 3, 4, 5, 5, 10],
'minorscale': [2/60, 2/60, 5/60, 10/60, 20/60, 30/60, 1, 1, 1, 2],
'scale': 1000
}
default_minute = {
'range': [1, 2, 3, 5, 8, 15, 20, 30, 40, 60],
'majorscale': [15/60, 20/60, 30/60, 1, 2, 3, 4, 5, 10],
'minorscale': [3/60, 5/60, 5/60, 10/60, 20/60, 30/60, 1, 1, 2],
'scale': 60
}
default_hour = {
'range': [1, 2, 3, 5, 8, 12, 18, 24],
'majorscale': [15/60, 20/60, 30/60, 1, 2, 3, 4],
'minorscale': [3/60, 5/60, 5/60, 10/60, 20/60, 30/60, 1],
'scale': 60
}
default_day = {
'range': [1, 2, 3, 5, 8, 14, 21, 30, 50, 80, 122],
'majorscale': [6/24, 8/24, 12/24, 1, 2, 3, 5, 10, 15],
'minorscale': [1/24, 2/24, 3/24, 4/24, 8/24, 12/24, 1, 2, 3],
'scale': 24
}
default_month = {
'range': [3, 4, 7, 12],
'majorscale': [1/2, 1, 2],
'minorscale': [1/6, 1/3, 1/2],
'scale': 30
}
default_year = {
'range': [1, 2, 3, 5, 10, 15, 20, 30, 50, 100, 200, 400, 800],
'majorscale': [3/12, 4/12, 6/12, 1, 2, 3, 5, 5, 10, 20, 50, 100],
'minorscale': [1/12, 1/12, 2/12, 3/12, 4/12, 6/12, 1, 1, 2, 5, 10, 20],
'scale': 12
}
if 'my_second' in kwargs.keys():
default_second.update(kwargs['my_second'])
if 'my_minute' in kwargs.keys():
default_second.update(kwargs['my_minute'])
if 'my_hour' in kwargs.keys():
default_second.update(kwargs['my_hour'])
if 'my_day' in kwargs.keys():
default_second.update(kwargs['my_day'])
if 'my_month' in kwargs.keys():
default_second.update(kwargs['my_month'])
if 'my_year' in kwargs.keys():
default_second.update(kwargs['my_year'])
default_settings = {
1: {},
2: default_second,
3: default_minute,
4: default_hour,
5: default_day,
6: default_month,
7: default_year
}
tdelta = dt_stop - dt_start
diff = tdelta.total_seconds()
for ind in range(2, 8):
range_ = default_settings[ind]['range']
if (diff >= range_[0]) and (diff < range_[-1]):
break
else:
if ind == 2:
diff = diff/60
elif ind ==3:
diff = diff/60
elif ind == 4:
diff = diff/24
elif ind == 5:
diff = dttool.get_diff_months(dt_start, dt_stop)
elif ind == 6:
diff = dt_stop.year - dt_start.year
setting = default_settings[ind]
range_ = setting['range']
for range_ind, value in enumerate(range_):
if diff < value:
break
range_ind = range_ind - 1
majorscale = setting['majorscale'][range_ind]
minorscale = setting['minorscale'][range_ind]
if majorscale < range_[0]:
majorlocatorclass = eval(mdlocators[ind - 1])
majorscale = majorscale * setting['scale']
else:
majorlocatorclass = eval(mdlocators[ind])
if minorscale < range_[0]:
minorlocatorclass = eval(mdlocators[ind - 1])
minorscale = minorscale * setting['scale']
else:
minorlocatorclass = eval(mdlocators[ind])
majorscale = int(majorscale)
minorscale = int(minorscale)
# for microseconds,
if majorlocatorclass is mdates.MicrosecondLocator:
interval = majorscale
majorlocator = majorlocatorclass(interval=majorscale)
if dt_start.minute != dt_stop.minute:
fmt = "%M:%S.%f"
else:
fmt = "%S.%f"
def formatter_microsecond(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dt.month, dtx.day, dtx.hour, dtx.minute, 0)
if delta.total_seconds() == 0:
fmt1 = "%M:%S.%f"
else:
fmt1 = "%S.%f"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_microsecond)
if minorlocatorclass is mdates.MicrosecondLocator:
interval = minorscale
minorlocator = minorlocatorclass(interval=minorscale)
# for second
if majorlocatorclass is mdates.SecondLocator:
by1 = range(0, 60, majorscale)
majorlocator = majorlocatorclass(bysecond=by1, interval=1)
if dt_start.hour != dt_stop.hour:
fmt = "%H:%M:%S"
else:
fmt = "%M:%S"
def formatter_second(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day, dtx.hour, 0, 0)
if delta.total_seconds() == 0:
fmt1 = "%H:%M:%S"
else:
fmt1 = "%M:%S"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_second)
if minorlocatorclass is mdates.SecondLocator:
by1 = range(0, 60, minorscale)
minorlocator = minorlocatorclass(bysecond=by1, interval=1)
# for minute
if majorlocatorclass is mdates.MinuteLocator:
by1 = range(0, 60, majorscale)
majorlocator = majorlocatorclass(byminute=by1, interval=1)
if dt_start.day != dt_stop.day:
fmt = "%d %H:%M"
else:
fmt = "%H:%M"
def formatter_minute(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day, 0, 0, 0)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_minute)
if minorlocatorclass is mdates.MinuteLocator:
by1 = range(0, 60, minorscale)
minorlocator = minorlocatorclass(byminute=by1, interval=1)
# for hour
if majorlocatorclass is mdates.HourLocator:
by1 = range(0, 24, majorscale)
majorlocator = majorlocatorclass(byhour=by1, interval=1)
if dt_start.month != dt_stop.month:
fmt = "%d/%m %H"
elif dt_start.day != dt_stop.day:
fmt = "%d %H:%M"
else:
fmt = "%H:%M"
def formatter_hour(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_hour)
if minorlocatorclass is mdates.HourLocator:
by1 = range(0, 24, minorscale)
minorlocator = minorlocatorclass(byhour=by1, interval=1)
# for day
if majorlocatorclass is mdates.DayLocator:
temp = int(np.floor(31.5/majorscale))
by1 = range(1, temp*majorscale, majorscale)
majorlocator = majorlocatorclass(bymonthday=by1, interval=1)
if dt_start.year != dt_stop.year:
fmt = "%Y-%m-%d"
else:
fmt = "%b %d"
def formatter_day(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, 1, 1)
if delta.total_seconds() == 0:
fmt1 = "%Y-%m-%d"
else:
fmt1 = "%m-%d"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_day)
if minorlocatorclass is mdates.DayLocator:
temp = int(np.floor(31.5 / minorscale))
by1 = range(1, temp * minorscale, minorscale)
minorlocator = minorlocatorclass(bymonthday=by1, interval=1)
# for month
if majorlocatorclass is mdates.MonthLocator:
by1 = range(1, 13, majorscale)
majorlocator = majorlocatorclass(bymonth=by1, interval=1)
fmt = "%Y-%m"
def formatter_month(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, 1, 1)
if delta.total_seconds() == 0:
fmt1 = "%Y-%m-%d"
else:
fmt1 = "%m-%d"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_month)
if minorlocatorclass is mdates.MonthLocator:
by1 = range(1, 13, minorscale)
minorlocator = minorlocatorclass(bymonth=by1, interval=1)
# for year
if majorlocatorclass is mdates.YearLocator:
majorlocator = majorlocatorclass(base=majorscale)
def formatter_year(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
fmt1 = "%Y"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_year)
if minorlocatorclass is mdates.YearLocator:
minorlocator = minorlocatorclass(base=minorscale)
majorformatter = mdates.DateFormatter(fmt)
majorformatter = func_formatter
return majorlocator, minorlocator, majorformatter
# def timeline_format_function(x, pos=None):
# x = mpl.dates.num2date(x)
# if pos == 0:
# # fmt = '%D %H:%M:%S.%f'
# fmt = '%H:%M'
# else:
# fmt = '%H:%M'
# label = x.strftime(fmt)
# #label = label.rstrip("0")
# #label = label.rstrip(".")
# return label
#
# def set_ticks_datetime(fig, ax, axis='x', locator=None, formatter=None, interval=None, visable='on'):
# xlim = getattr(ax, 'get_' + axis + 'lim')()
# dxlim = np.diff(xlim) * 86400.
# dsec = dxlim
# dmin = dsec / 60.
# dhour = dsec / 60.
# dday = dhour / 24.
# dmonth = dday / 30.
# dyear = dday / 365.
# if locator is None:
# pass
# locObj = getattr(mdates, locator)
# majorlocator = mdates.MinuteLocator(interval=1)
# formatter = mdates.AutoDateFormatter(dtlocator)
# formatter.scaled[1 / (24. * 60.)] = matplotlib.ticker.FuncFormatter(my_format_function)
# if iax < nax - 1:
# formatter = matplotlib.ticker.NullFormatter()
# ax.xaxis.set_major_locator(dtlocator)
# ax.xaxis.set_major_formatter(formatter)
# ax.xaxis.set_minor_locator(mdates.SecondLocator(interval=10))
# ax.xaxis.set_tick_params(labelsize='small')
# class CustomTicker(LogFormatterSciNotation):
# # https://stackoverflow.com/questions/43923966/logformatter-tickmarks-scientific-format-limits
# def __call__(self, x, pos=None):
# if x not in [0.1,1,10]:
# return LogFormatterSciNotation.__call__(self,x, pos=None)
# else:
# return "{x:g}".format(x=x)
# fig = plt.figure(figsize=[7,7])
# ax = fig.add_subplot(111)
# ax.set_yscale('log')
# ax.set_xscale('log')
# ax.plot(np.logspace(-4,4), np.logspace(-4,4))
#
# ax.xaxis.set_major_formatter(CustomTicker())
# plt.show()
```
#### File: visualization/mpl/panels.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import numpy as np
import re
import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mpl_colors
import matplotlib.ticker as mpl_ticker
import matplotlib.dates as mpl_dates
from scipy.interpolate import interp1d
from geospacelab.visualization.mpl._base import PanelBase
from geospacelab.datahub._base_variable import VariableModel
import geospacelab.toolbox.utilities.numpyarray as arraytool
import geospacelab.visualization.mpl.axis_ticks as ticktool
import geospacelab.toolbox.utilities.pybasic as basic
import geospacelab.toolbox.utilities.pylogging as mylog
from geospacelab.visualization.mpl._helpers import check_panel_ax
import geospacelab.visualization.mpl.colormaps as mycmap
class Panel(PanelBase):
def __init__(self, *args, figure=None, from_subplot=True, **kwargs):
super(Panel, self).__init__(*args, figure=figure, from_subplot=from_subplot, **kwargs)
class TSPanel(Panel):
_default_plot_config = {
'linestyle': '-',
'linewidth': 1.5,
'marker': '.',
'markersize': 1
}
_default_legend_config = {
'loc': 'upper left',
'bbox_to_anchor': (1.0, 1.0),
'frameon': False,
'fontsize': 'medium'
}
_default_xtick_params = {
'labelsize': plt.rcParams['xtick.labelsize'],
'direction': 'inout',
}
_default_colorbar_offset = 0.1
def __init__(
self, *args,
dt_fr=None, dt_to=None, figure=None, from_subplot=True,
bottom_panel=True, timeline_reverse=False, timeline_multiple_labels=None,
time_gap=True,
**kwargs
):
super(Panel, self).__init__(*args, figure=figure, from_subplot=from_subplot, **kwargs)
self.dt_fr = dt_fr
self.dt_to = dt_to
self._xlim = [dt_fr, dt_to]
self.timeline_reverse = timeline_reverse
self.time_gap = time_gap
if timeline_multiple_labels is None:
timeline_multiple_labels = []
self.timeline_multiple_labels = timeline_multiple_labels
self._var_for_config = None
self.bottom_panel = bottom_panel
def draw(self, plot_layout, ax=None):
self.overlay_from_variables(plot_layout, ax=ax)
def overlay_from_variables(self, layout_in, ax=None, level=0):
if ax is None:
ax = self()
self.sca(ax)
if level == 0 and not list(layout_in):
raise TypeError
elif level == 0:
if type(layout_in[0]) is list:
raise ValueError("The first element in the plot layout cannot be a list!")
if any(type(i) is list for i in layout_in):
self.axes_overview[ax]['twinx'] = 'on'
if level > 1:
raise NotImplemented
iplts = []
for ind, elem in enumerate(layout_in):
if ind == 0 or issubclass(elem.__class__, VariableModel):
var = elem[0] if type(elem) is list else elem
iplts_add = self.overlay_a_variable(var, ax=ax)
elif level == 0 and isinstance(elem, list):
ax_in = self.add_twin_axes(
ax=ax, which='x', location='right', offset_type='outward',
offset=50*(len(self.axes_overview[ax]['twinx_axes']))
)
self.axes_overview[ax_in]['twinx'] = 'self'
ax_in._get_lines.prop_cycler = ax._get_lines.prop_cycler
iplts_add = self.overlay_from_variables(elem, ax=ax_in, level=level+1)
else:
raise NotImplementedError
iplts.extend(iplts_add)
if level == 0:
if self.axes_overview[ax]['legend'] == 'on' and self.axes_overview[ax]['twinx'] == 'off':
self._check_legend(ax)
if self.axes_overview[ax]['twinx'] == 'on':
self._check_twinx(ax)
if self.axes_overview[ax]['colorbar'] == 'on':
self._check_colorbar(ax)
self._set_xaxis(ax=ax)
self._set_yaxis(ax=ax)
return iplts
def overlay_a_variable(self, var, ax=None):
self.axes_overview[ax]['variables'].extend([var])
var_for_config = self.axes_overview[ax]['variables'][0]
plot_style = var_for_config.visual.plot_config.style
if plot_style in ['1P', '1noE']:
iplt = self.overlay_line(var, ax=ax)
elif plot_style in ['1', '1E']:
iplt = self.overlay_line(var, ax=ax, errorbar='on')
elif plot_style in ['2P']:
iplt = self.overlay_pcolormesh(var, ax=ax)
elif plot_style in ['2I']:
iplt = self.overlay_imshow(var, ax=ax)
elif plot_style in ['1B']:
iplt = self.overlay_bar(var, ax=ax)
elif plot_style in ['1F0']:
iplt = self.overlay_fill_between_y_zero(var, ax=ax)
elif plot_style in ['1S']:
iplt = self.overlay_scatter(var, ax=ax)
elif plot_style in ['1C']:
iplt = self.overlay_multiple_colored_line(var, ax=ax)
else:
raise NotImplementedError
return iplt
@check_panel_ax
def overlay_line(self, var, ax=None, errorbar='off', **kwargs):
"""
Overlay a line plot in the axes.
:param var: A GeospaceLab Variable object
:param ax: The axes to plot.
:param errorbar: If 'on', show errorbar.
:param kwargs: Other keyword arguments forwarded to ax.plot() or ax.errorbar()
:return:
"""
il = None
data = self._retrieve_data_1d(var)
x = data['x']
y = data['y']
y_err = data['y_err']
plot_config = basic.dict_set_default(kwargs, **var.visual.plot_config.line)
plot_config = basic.dict_set_default(plot_config, **self._default_plot_config)
if self.axes_overview[ax]['twinx'] in ['on', 'self']:
var.visual.axis[1].label = '@v.label'
label = var.get_visual_axis_attr(axis=2, attr_name='label')
plot_config = basic.dict_set_default(plot_config, label=label)
if errorbar == 'off':
il = ax.plot(x, y, **kwargs)
elif errorbar == 'on':
errorbar_config = dict(plot_config)
errorbar_config.update(var.visual.plot_config.errorbar)
il = ax.errorbar(x, y, yerr=y_err, ax=ax, **errorbar_config)
if type(il) is not list:
il = [il]
self.axes_overview[ax]['lines'].extend(il)
if self.axes_overview[ax]['legend'] is None:
self.axes_overview[ax]['legend'] = 'on'
return il
@check_panel_ax
def overlay_bar(self, *args, ax=None, **kwargs):
var = args[0]
il = None
data = self._retrieve_data_1d(var)
x = data['x'].flatten()
height = data['y'].flatten()
bar_config = basic.dict_set_default(kwargs, **var.visual.plot_config.bar)
color_by_value = bar_config.pop('color_by_value', False)
colormap = bar_config.pop('colormap', 'viridis')
vmin = bar_config.pop('vmin', np.nanmin(height))
vmax = bar_config.pop('vmax', np.nanmax(height))
if color_by_value:
if isinstance(colormap, str):
colormap = mpl.cm.get_cmap(colormap)
norm = mpl_colors.Normalize(vmin=vmin, vmax=vmax)
colors = colormap(norm(height))
bar_config.update(color=colors)
# plot_config = basic.dict_set_default(plot_config, **self._default_plot_config)
ib = ax.bar(x, height, **bar_config)
return [ib]
@check_panel_ax
def overlay_pcolormesh(self, *args, ax=None, **kwargs):
var = args[0]
data = self._retrieve_data_2d(var)
x = data['x']
y = data['y']
z = data['z']
if x.shape[0] == z.shape[0]:
delta_x = np.diff(x, axis=0)
x[:-1, :] = x[:-1, :] + delta_x/2
x = np.vstack((
np.array(x[0, 0] - delta_x[0, 0] / 2).reshape((1, 1)),
x[:-1, :],
np.array(x[-1, 0] + delta_x[-1, 0] / 2).reshape((1, 1))
))
if y.shape[1] == z.shape[1]:
delta_y = np.diff(y, axis=1)
y[:, :-1] = y[:, :-1] + delta_y/2
y = np.hstack((
np.array(y[:, 0] - delta_y[:, 0]/2).reshape((y.shape[0], 1)),
y[:, :-1],
np.array(y[:, -1] + delta_y[:, -1]/2).reshape((y.shape[0], 1)),
))
if y.shape[0] == z.shape[0]:
y = np.vstack((y, y[-1, :].reshape((1, y.shape[1]))))
pcolormesh_config = var.visual.plot_config.pcolormesh
z_lim = var.visual.axis[2].lim
if z_lim is None:
z_lim = [np.nanmin(z.flatten()), np.nanmax(z.flatten())]
z_scale = var.visual.axis[2].scale
if z_scale == 'log':
norm = mpl_colors.LogNorm(vmin=z_lim[0], vmax=z_lim[1])
pcolormesh_config.update(norm=norm)
else:
pcolormesh_config.update(vmin=z_lim[0])
pcolormesh_config.update(vmax=z_lim[1])
colormap = mycmap.get_colormap(var.visual.plot_config.pcolormesh.get('cmap', None))
pcolormesh_config.update(cmap=colormap)
im = ax.pcolormesh(x.T, y.T, z.T, **pcolormesh_config)
self.axes_overview[ax]['collections'].extend([im])
if self.axes_overview[ax]['colorbar'] is None:
self.axes_overview[ax]['colorbar'] = 'on'
return [im]
@check_panel_ax
def _get_var_for_config(self, ax=None, ind=0):
var_for_config = self.axes_overview[ax]['variables'][ind]
return var_for_config
def _set_xaxis(self, ax):
if self._xlim[0] is None:
self._xlim[0] = mpl_dates.num2date(ax.get_xlim()[0])
if self._xlim[1] is None:
self._xlim[1] = mpl_dates.num2date(ax.get_xlim()[1])
ax.set_xlim(self._xlim)
# reverse the x axis if timeline_reverse=True
if self.timeline_reverse:
ax.set_xlim((self._xlim[1], self._xlim[0]))
ax.xaxis.set_tick_params(labelsize=self._default_xtick_params['labelsize'])
ax.xaxis.set_tick_params(
which='both',
direction=self._default_xtick_params['direction'],
bottom=True, top=True
)
ax.xaxis.set_tick_params(which='major', length=8)
ax.xaxis.set_tick_params(which='minor', length=4)
# use date locators
# majorlocator, minorlocator, majorformatter = ticktool.set_timeline(self._xlim[0], self._xlim[1])
from geospacelab.visualization.mpl.axis_ticks import DatetimeMajorFormatter, DatetimeMajorLocator, DatetimeMinorLocator
majorlocator = DatetimeMajorLocator()
majorformatter = DatetimeMajorFormatter(majorlocator)
if not self.bottom_panel:
majorformatter = mpl_ticker.NullFormatter()
plt.setp(ax.get_xticklabels(), visible=False)
ax.xaxis.set_major_locator(majorlocator)
ax.xaxis.set_major_formatter(majorformatter)
# minor locator must be set up after setting the major locator
minorlocator = DatetimeMinorLocator(ax=ax, majorlocator=majorlocator)
ax.xaxis.set_minor_locator(minorlocator)
if self.bottom_panel:
self._set_xaxis_ticklabels(ax, majorformatter=majorformatter)
def _set_xaxis_ticklabels(self, ax, majorformatter=None):
var_for_config = self._get_var_for_config(ax=ax)
# ax.tick_params(axis='x', labelsize=plt.rcParams['xtick.labelsize'])
# set UT timeline
if not list(self.timeline_multiple_labels):
ax.set_xlabel('UT', fontsize=12, fontweight='normal')
return
figlength = self.figure.get_size_inches()[1]*2.54
if figlength > 20:
yoffset = 0.02
else:
yoffset = 0.04
ticks = ax.get_xticks()
ylim0, _ = ax.get_ylim()
xy_fig = []
# transform from data cs to figure cs
for tick in ticks:
px = ax.transData.transform([tick, ylim0])
xy = self.figure.transFigure.inverted().transform(px)
xy_fig.append(xy)
xlabels = ['UT']
x_depend = var_for_config.get_depend(axis=0, retrieve_data=True)
x0 = np.array(mpl_dates.date2num(x_depend['UT'])).flatten()
x1 = np.array(ticks)
ys = [x1] # list of tick labels
for ind, label in enumerate(self.timeline_multiple_labels[1:]):
if label in x_depend.keys():
y0 = x_depend[label].flatten()
elif label in var_for_config.dataset.keys():
y0 = var_for_config.dataset[label].value
else:
raise KeyError
if label == 'MLT':
y0_sin = np.sin(y0 / 24. * 2 * np.pi)
y0_cos = np.cos(y0 / 24. * 2 * np.pi)
itpf_sin = interp1d(x0, y0_sin, bounds_error=False, fill_value='extrapolate')
itpf_cos = interp1d(x0, y0_cos, bounds_error=False, fill_value="extrapolate")
y0_sin_i = itpf_sin(x1)
y0_cos_i = itpf_cos(x1)
rad = np.sign(y0_sin_i) * (np.pi / 2 - np.arcsin(y0_cos_i))
rad = np.where((rad >= 0), rad, rad + 2 * np.pi)
y1 = rad / 2. / np.pi * 24.
else:
itpf = interp1d(x0, y0, bounds_error=False, fill_value='extrapolate')
y1 = itpf(x1)
ys.append(y1)
xlabels.append(label)
ax.xaxis.set_major_formatter(mpl_ticker.NullFormatter())
# if self.major_timeline == 'MLT':
# for ind_pos, xtick in enumerate(ys[mlt_ind]):
# if xtick == np.nan:
# continue
# text = (datetime.datetime(1970, 1, 1) + datetime.timedelta(hours=xtick)).strftime('%H:%M')
# plt.text(
# xy_fig[ind_pos][0], xy_fig[ind_pos][1] - yoffset * ind - 0.15,
# text,
# fontsize=9,
# horizontalalignment='center', verticalalignment='top',
# transform=self.figure.transFigure
# )
# ax.set_xlabel('MLT')
# return
for ind, xticks in enumerate(ys):
ax.text(
0.1, xy_fig[0][1] - yoffset * ind - 0.013,
xlabels[ind],
fontsize=plt.rcParams['xtick.labelsize'], fontweight='normal',
horizontalalignment='right', verticalalignment='top',
transform=self.figure.transFigure
)
for ind_pos, xtick in enumerate(xticks):
if np.isnan(xtick):
continue
if ind == 0:
text = majorformatter.format_data(xtick)
elif xlabels[ind] == 'MLT':
text = (datetime.datetime(1970, 1, 1) + datetime.timedelta(hours=xtick)).strftime('%H:%M')
else:
text = '%.1f' % xtick
ax.text(
xy_fig[ind_pos][0], xy_fig[ind_pos][1] - yoffset * ind - 0.013,
text,
fontsize=plt.rcParams['xtick.labelsize'],
horizontalalignment='center', verticalalignment='top',
transform=self.figure.transFigure
)
@check_panel_ax
def _set_yaxis(self, ax=None):
var_for_config = self._get_var_for_config(ax=ax)
ax.tick_params(axis='y', which='major', labelsize=plt.rcParams['ytick.labelsize'])
# Set y axis lim
self._set_ylim(ax=ax)
# set y labels and alignment two methods: fig.align_ylabels(axs[:, 1]) or yaxis.set_label_coords
ylabel = var_for_config.get_visual_axis_attr('label', axis=1)
yunit = var_for_config.get_visual_axis_attr('unit', axis=1)
ylabel_style = var_for_config.get_visual_axis_attr('label_style', axis=1)
if self.axes_overview[ax]['twinx'] == 'self':
ylabel = self.generate_label(ylabel, unit=yunit, style='single')
ax.set_ylabel(ylabel, va='baseline', rotation=270, fontsize=plt.rcParams['axes.labelsize'])
else:
ylabel = self.generate_label(ylabel, unit=yunit, style=ylabel_style)
label_pos = var_for_config.visual.axis[1].label_pos
ax.set_ylabel(ylabel, va='bottom', fontsize=plt.rcParams['axes.labelsize'])
if label_pos is not None:
ax.yaxis.set_label_coords(label_pos[0], label_pos[1])
ylim = ax.get_ylim()
# set yaxis scale
yscale = var_for_config.visual.axis[1].scale
ax.set_yscale(yscale)
# set yticks and yticklabels, usually do not change the matplotlib default
yticks = var_for_config.visual.axis[1].ticks
yticklabels = var_for_config.visual.axis[1].tick_labels
if yticks is not None:
ax.set_yticks(yticks)
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
else:
if yscale == 'linear':
major_max = var_for_config.visual.axis[1].major_tick_max
minor_max = var_for_config.visual.axis[1].minor_tick_max
if major_max is None:
ax.yaxis.set_minor_locator(mpl_ticker.AutoLocator())
else:
ax.yaxis.set_major_locator(mpl_ticker.MaxNLocator(major_max))
if minor_max is None:
ax.yaxis.set_minor_locator(mpl_ticker.AutoMinorLocator())
else:
ax.yaxis.set_minor_locator(mpl_ticker.MaxNLocator(minor_max))
if yscale == 'log':
locmin = mpl_ticker.LogLocator(base=10.0,
subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
numticks=12
)
ax.yaxis.set_minor_locator(locmin)
# ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax.set_ylim(ylim)
@check_panel_ax
def _set_ylim(self, ax=None, zero_line='on'):
var_for_config = self.axes_overview[ax]['variables'][0]
ylim = var_for_config.visual.axis[1].lim
ylim_current = ax.get_ylim()
if ylim is None:
ylim = ylim_current
elif ylim[0] == -np.inf and ylim[1] == np.inf:
maxlim = np.max(np.abs(ylim_current))
ylim = [-maxlim, maxlim]
else:
if ylim[0] is None:
ylim[0] = ylim_current[0]
if ylim[1] is None:
ylim[1] = ylim_current[1]
if zero_line == 'on' and self.axes_overview[ax]['twinx']=='off':
if (ylim[0] < 0) and (ylim[1] > 0):
ax.plot(ax.get_xlim(), [0, 0], 'k--', linewidth=0.5)
ax.set_ylim(ylim)
return
def _check_legend(self, ax):
ax_ov = self.axes_overview[ax]
var_for_config = ax_ov['variables'][0]
legend_config = var_for_config.visual.plot_config.legend
# get color
legend_config = basic.dict_set_default(legend_config, **self._default_legend_config)
if len(ax_ov['lines']) == 1 and ax_ov['lines'][0]._label == '_line0':
return
ax.legend(handles=ax_ov['lines'], **legend_config)
def _check_twinx(self, ax):
ax_ov = self.axes_overview[ax]
axes = [ax]
axes.extend(ax_ov['twinx_axes'])
for ind, pax in enumerate(axes):
pax_ov = self.axes_overview[pax]
if list(pax_ov['lines']):
il = pax_ov['lines'][0]
pax.yaxis.label.set_color(il.get_color())
pax.tick_params(axis='y', which='both', colors=il.get_color())
pax.spines['right'].set_edgecolor(il.get_color())
else:
continue
def _check_colorbar(self, ax):
ax_ov = self.axes_overview[ax]
var_for_config = ax_ov['variables'][0]
colorbar_config = var_for_config.visual.plot_config.colorbar
c_scale = var_for_config.visual.axis[2].scale
c_label = var_for_config.get_visual_axis_attr('label', axis=2)
c_unit = var_for_config.get_visual_axis_attr('unit', axis=2)
c_label_style = var_for_config.get_visual_axis_attr('label_style', axis=2)
c_label = self.generate_label(c_label, unit=c_unit, style=c_label_style)
c_ticks = var_for_config.visual.axis[2].ticks
c_tick_labels = var_for_config.visual.axis[2].tick_labels
im = ax_ov['collections'][0]
offset = self._default_colorbar_offset
ntwinx = len(ax_ov['twinx_axes'])
cax_position = [1.02 + offset * ntwinx, 0.01, 0.025, 0.85]
colorbar_config.update(
cax_scale=c_scale, cax_label=c_label, cax_ticks=c_ticks, cax_tick_labels=c_tick_labels,
cax_position=cax_position
)
self.add_colorbar(im, cax='new', **colorbar_config)
def _retrieve_data_1d(self, var):
x_data = var.get_visual_axis_attr(axis=0, attr_name='data')
if type(x_data) == list:
x_data = x_data[0]
if x_data is None:
depend_0 = var.get_depend(axis=0)
x_data = depend_0['UT'] # numpy array, type=datetime
if x_data is None:
x_data = np.array([0, 1]).reshape(2, 1)
var.visual.axis[0].mask_gap = False
y_data = var.get_visual_axis_attr(axis=1, attr_name='data')
if y_data is None:
y_data = var.value
if y_data is None:
y_data = np.empty_like(x_data, dtype=np.float32)
y_data[::] = np.nan
y_err_data = var.get_visual_axis_attr(axis=1, attr_name='data_err')
if y_err_data is None:
y_err_data = var.error
x_data_res = var.visual.axis[0].data_res
x = x_data
y = y_data * var.visual.axis[1].data_scale
if y_err_data is None:
y_err = np.empty_like(y)
y_err[:] = 0
else:
y_err = y_err_data * var.visual.axis[1].data_scale
# resample time if needed
x_data = x
y_data = y
y_err_data = y_err
time_gap = var.visual.axis[0].mask_gap
if time_gap is None:
time_gap = self.time_gap
if time_gap:
x, y = arraytool.data_resample(
x_data, y_data, xtype='datetime', xres=x_data_res, method='Null', axis=0)
x, y_err = arraytool.data_resample(
x_data, y_err_data, xtype='datetime', xres=x_data_res, method='Null', axis=0)
data = {'x': x, 'y': y, 'y_err': y_err}
return data
def _retrieve_data_2d(self, var):
x_data = var.get_visual_axis_attr(axis=0, attr_name='data')
if type(x_data) == list:
x_data = x_data[0]
if x_data is None:
depend_0 = var.get_depend(axis=0)
x_data = depend_0['UT'] # numpy array, type=datetime
if x_data is None:
x_data = np.array([self._xlim[0], self._xlim[1]]).reshape(2, 1)
var.visual.axis[0].mask_gap = False
y_data = var.get_visual_axis_attr(axis=1, attr_name='data')
if type(y_data) == list:
y_data = y_data[0]
if y_data is None:
y_data = var.get_depend(axis=1, retrieve_data=True)
y_data_keys = list(y_data.keys())
y_data = y_data[y_data_keys[0]]
if y_data is None:
y_data = np.array([0, 1]).reshape(1, 2)
y_data = y_data * var.visual.axis[1].data_scale
z_data = var.get_visual_axis_attr(axis=2, attr_name='data')
if type(z_data) == list:
z_data = z_data[0]
if z_data is None:
z_data = var.value
if z_data is None:
z_data = np.array([[np.nan, np.nan], [np.nan, np.nan]])
if (x_data is None) or (z_data is None):
raise ValueError
z_data = z_data * var.visual.axis[2].data_scale
x_data_res = var.visual.axis[0].data_res
time_gap = var.visual.axis[0].mask_gap
if time_gap is None:
time_gap = self.time_gap
if time_gap:
x, y, z = arraytool.data_resample_2d(
x=x_data, y=y_data, z=z_data, xtype='datetime', xres=x_data_res, method='Null', axis=0)
else:
x = x_data
y = y_data
z = z_data
data = {'x': x, 'y': y, 'z': z}
return data
@staticmethod
def generate_label(label: str, unit: str='', style: str='double'):
label = label
if str(unit):
if style == 'single':
label = label + " " + '(' + unit + ')'
elif style == 'double':
label = label + "\n" + '(' + unit + ')'
else:
raise NotImplementedError
return label
def check_panel_ax(func):
def wrapper(*args, **kwargs):
obj = args[-1]
kwargs.setdefault('ax', None)
if kwargs['ax'] is None:
kwargs['ax'] = obj.axes['major']
result = func(*args, **kwargs)
return result
return wrapper
class Panel1(object):
def __init__(self, *args, figure=None, **kwargs):
if figure is None:
figure = plt.gcf()
self.figure = figure
self.axes = {}
self.label = kwargs.pop('label', None)
# self.objectives = kwargs.pop('objectives', {})
ax = self.figure.add_subplot(*args, **kwargs)
self.axes['major'] = ax
# def add_subplot(self, *args, major=False, label=None, **kwargs):
# if major:
# label = 'major'
# else:
# if label is None:
# label = len(self.axes.keys())
#
# ax = self.figure.add_subplot(*args, **kwargs)
# self.axes[label] = ax
# return ax
def add_axes(self, *args, major=False, label=None, **kwargs):
if major:
label = 'major'
else:
if label is None:
label = len(self.axes.keys())
ax = self.figure.add_axes(*args, **kwargs)
ax.patch.set_alpha(0)
self.axes[label] = ax
return ax
@property
def major_ax(self):
return self.axes['major']
@major_ax.setter
def major_ax(self, ax):
self.axes['major'] = ax
@check_panel_ax
def plot(self, *args, ax=None, **kwargs):
# plot_type "1P"
ipl = ax.plot(*args, **kwargs)
return ipl
@check_panel_ax
def errorbar(self, *args, ax=None, **kwargs):
# plot_type = "1E"
ieb = ax.errorbar(*args, **kwargs)
return ieb
@check_panel_ax
def pcolormesh(self, *args, ax=None, **kwargs):
# plot_type: "2P"
ipm = ax.pcolormesh(*args, **kwargs)
return ipm
@check_panel_ax
def imshow(self, *args, ax=None, **kwargs):
# plot_type = "2I"
im = ax.imshow(*args, **kwargs)
return im
def add_label(self, x, y, label, ax=None, ha='left', va='center', **kwargs):
if ax is None:
ax = self.axes['major']
if label is None:
label = ''
transform = kwargs.pop('transform', ax.transAxes)
ax.text(x, y, label, transform=transform, ha=ha, va=va, **kwargs)
def add_title(self, *args, **kwargs):
self.axes['major'].set_title(*args, **kwargs)
def test_tspanel():
from geospacelab.express.eiscat_viewer import EISCATViewer
dt_fr = datetime.datetime.strptime('20211010' + '1700', '%Y%m%d%H%M')
dt_to = datetime.datetime.strptime('20211010' + '2100', '%Y%m%d%H%M')
site = 'UHF'
antenna = 'UHF'
modulation = 'ant'
load_mode = 'AUTO'
data_file_type = 'eiscat-hdf5'
viewer = EISCATViewer(dt_fr, dt_to, site=site, antenna=antenna, modulation=modulation,
data_file_type=data_file_type, load_mode=load_mode, status_control=True,
residual_control=True)
# select beams before assign the variables
# viewer.dataset.select_beams(field_aligned=True)
# viewer.dataset.select_beams(az_el_pairs=[(188.6, 77.7)])
viewer.status_mask()
n_e = viewer.assign_variable('n_e')
T_i = viewer.assign_variable('T_i')
T_e = viewer.assign_variable('T_e')
v_i = viewer.assign_variable('v_i_los')
az = viewer.assign_variable('AZ')
el = viewer.assign_variable('EL')
panel1 = TSPanel(dt_fr=dt_fr, dt_to=dt_to)
panel1.draw([n_e])
plt.show()
if __name__ == '__main__':
test_tspanel()
```
#### File: visualization/plotly/ipanel.py
```python
class Panel(object):
def __init__(self):
pass
def add_line(self):
pass
def add_image(self):
pass
def add_pcolor(self):
pass
def add_scatter(self):
pass
```
#### File: GeoSpaceLab/test/test_superdarn.py
```python
import datetime
import matplotlib.pyplot as plt
import numpy as np
import geospacelab.visualization.mpl.geomap.geodashboards as geomap
def test_ampere():
dt_fr = datetime.datetime(2016, 3, 15, 0)
dt_to = datetime.datetime(2016, 3, 15, 23, 59)
time1 = datetime.datetime(2016, 3, 15, 1, 10)
pole = 'N'
load_mode = 'assigned'
# specify the file full path
data_file_paths = ['/home/lei/afys-data/SuperDARN/PotentialMap/2016/test.dat']
# data_file_paths = ['/Users/lcai/Geospacelab/Data/SuperDARN/POTMAP/2016/SuperDARM_POTMAP_20160314_10min_test.txt']
viewer = geomap.GeoDashboard(dt_fr=dt_fr, dt_to=dt_to, figure_config={'figsize': (8, 8)})
viewer.dock(datasource_contents=['superdarn', 'potmap'], load_mode=load_mode, data_file_paths=data_file_paths)
viewer.set_layout(1, 1)
dataset_superdarn = viewer.datasets[1]
phi = viewer.assign_variable('GRID_phi', dataset_index=1)
dts = viewer.assign_variable('DATETIME', dataset_index=1).value.flatten()
mlat = viewer.assign_variable('GRID_MLAT', dataset_index=1)
mlon = viewer.assign_variable('GRID_MLON', dataset_index=1)
mlt = viewer.assign_variable(('GRID_MLT'), dataset_index=1)
ind_t = dataset_superdarn.get_time_ind(ut=time1)
# initialize the polar map
pid = viewer.add_polar_map(row_ind=0, col_ind=0, style='mlt-fixed', cs='AACGM', mlt_c=0., pole=pole, ut=time1, boundary_lat=50, mirror_south=True)
panel1 = viewer.panels[pid]
panel1.add_coastlines()
phi_ = phi.value[ind_t]
mlat_ = mlat.value[ind_t]
mlt_ = mlt.value[ind_t]
mlon_ = mlon.value[ind_t]
# grid_mlat, grid_mlt, grid_phi = dataset_superdarn.grid_phi(mlat_, mlt_, phi_, interp_method='cubic')
grid_mlat, grid_mlt, grid_phi = dataset_superdarn.postprocess_roll(mlat_, mlt_, phi_)
# re-grid the original data with higher spatial resolution, default mlt_res = 0.05, mlat_res = 0.5. used for plotting.
# grid_mlat, grid_mlt, grid_fac = dataset_ampere.grid_fac(phi_, mlt_res=0.05, mlat_res=0.05, interp_method='linear')
levels = np.array([-21e3, -18e3, -15e3, -12e3, -9e3, -6e3, 3e3, 6e3, 9e3, 12e3, 15e3, 18e3, 21e3])
# ipc = panel1.add_pcolor(fac_, coords={'lat': mlat[ind_t, ::], 'lon': None, 'mlt': mlt[ind_t, ::], 'height': 250.}, cs='AACGM', **pcolormesh_config)
ict = panel1.add_contour(grid_phi, coords={'lat': grid_mlat, 'lon': None, 'mlt': grid_mlt}, cs='AACGM', colors='b', levels=levels)
# panel1.major_ax.clabel(ict, inline=True, fontsize=10)
panel1.add_gridlines(lat_res=5, lon_label_separator=5)
polestr = 'North' if pole == 'N' else 'South'
# panel1.add_title('DMSP/SSUSI, ' + band + ', ' + sat_id.upper() + ', ' + polestr + ', ' + time1.strftime('%Y-%m-%d %H%M UT'), pad=20)
plt.savefig('superdarn_example', dpi=300)
plt.show()
if __name__ == "__main__":
test_ampere()
```
#### File: GeoSpaceLab/test/test_swarm.py
```python
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from loaders.load_uta_gitm_201602_newrun import *
import utilities.datetime_utilities as du
import visualization.time_series as ts
from geospacelab.visualization.mpl.geomap.geopanels import PolarMapPanel as PolarMap
import geospacelab.visualization.mpl.colormaps as cm
def get_swarm_data(dt_fr, dt_to, satID="C"):
dt_range = [dt_fr, dt_to]
instr_info1 = {'name': 'SWARM-' + satID, 'assign_key': 'SWARM_ACC'}
database = 'uta'
paralist = [
{'database': database, 'instrument': instr_info1, 'paraname': 'N_n_SC'},
{'database': database, 'instrument': instr_info1, 'paraname': 'N_n_s450'},
{'database': database, 'instrument': instr_info1, 'paraname': 'N_n_s500'},
]
paras_layout = [[1, 2]]
tsObj = ts.TimeSeries(dt_range=dt_range, paralist=paralist, paras_layout=paras_layout, timeline='multiple')
sc_lat = tsObj.dataObjs['uta_swarm_c_acc'].paras['SC_GEO_LAT']
sc_lon = tsObj.dataObjs['uta_swarm_c_acc'].paras['SC_GEO_LON']
sc_lst = tsObj.dataObjs['uta_swarm_c_acc'].paras['SC_GEO_ST']
sc_datetime = tsObj.dataObjs['uta_swarm_c_acc'].paras['SC_DATETIME']
rho_n_sc = tsObj.dataObjs['uta_swarm_c_acc'].paras['N_n_SC']
swarm_data = {
'sc_lat': sc_lat,
'sc_lon': sc_lon,
'sc_lst': sc_lst,
'sc_datetime': sc_datetime,
'rho_n_sc': rho_n_sc
}
return swarm_data
def show_rho_n(dt_fr, dt_to):
swarm_data = get_swarm_data(dt_fr, dt_to)
sc_lon = swarm_data['sc_lon'].flatten()
sc_lat = swarm_data['sc_lat'].flatten()
sc_dt = swarm_data['sc_datetime'].flatten()
rho_n_sc = swarm_data['rho_n_sc'].flatten()
plt.figure(figsize=(8,8))
# cs = 'GEO'
# panel = PolarView(cs='GEO', sytle='lst-fixed', pole='N', lon_c=None, lst_c=0, mlt_c=None, ut=dt_fr, boundary_lat=30., proj_style='Stereographic')
cs = 'AACGM'
panel = PolarMap(cs=cs, style='mlt-fixed', pole='N', lon_c=None, lst_c=None, mlt_c=0, ut=dt_fr, boundary_lat=30., proj_style='Stereographic')
panel.add_subplot(major=True)
panel.set_extent(boundary_style='circle')
data = panel.projection.transform_points(ccrs.PlateCarree(), sc_lon, sc_lat)
x = data[:, 0]
y = data[:, 1]
from scipy.stats import linregress
coef = linregress(x, y)
a = coef.slope
b = coef.intercept
# x1 = np.linspace(np.nanmin(x), np.nanmax(x), num=500)
# y1 = np.linspace(np.nanmin(y), np.nanmax(y), num=500)
# x2d, y2d = np.meshgrid(x, y)
# z2d = griddata(data[:, 0:2], rho_n_sc.flatten(), (x2d, y2d), method='nearest')
# z2d_dist = np.abs(a*x2d - y2d + b) / np.sqrt(a**2 + 1)
# z2d = np.where(z2d_dist>1000, np.nan, z2d)
#
# im = panel.major_ax.pcolormesh(x2d, y2d, z2d, vmin=2e-14, vmax=20e-13, cmap='gist_ncar')
# panel.major_ax.plot(sc_lon, sc_lat, transform=ccrs.Geodetic(), linewidth=0.5, color='k')
# plt.colorbar(im)
# xx = np.tile(x, [3, 1])
#
# yy = np.concatenate((y[np.newaxis, :]-150000, y[np.newaxis, :], y[np.newaxis, :]+150000), axis=0)
# zz = np.concatenate((rho_n_sc.T, rho_n_sc.T, rho_n_sc.T), axis=0)
#
# zz = griddata(data[:, 0:2], rho_n_sc.flatten(), (xx, yy), method='nearest')
# im = panel.major_ax.pcolormesh(xx, yy, zz, vmin=2e-14, vmax=20e-13, cmap='gist_ncar')
#
# # panel.major_ax.plot(sc_lon, sc_lat, transform=ccrs.Geodetic(), linewidth=0.5, color='k')
# plt.colorbar(im)
#panel.major_ax.plot(data[:,0], data[:,1], linewidth=3)
from matplotlib.collections import LineCollection
coords = {'lat': sc_lat, 'lon': sc_lon, 'height': 250.}
cs_new = panel.cs_transform(cs_fr='GEO', cs_to=cs, coords=coords)
data = panel.projection.transform_points(ccrs.PlateCarree(), cs_new['lon'], cs_new['lat'])
x = data[:, 0]
y = data[:, 1]
z = rho_n_sc.flatten()
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
norm = plt.Normalize(2e-14, 18e-13)
lc = LineCollection(segments, cmap='gist_ncar', norm=norm)
lc.set_array(z)
lc.set_linewidth(6)
line = panel.major_ax.add_collection(lc)
cbar = plt.gcf().colorbar(line, ax=panel.major_ax, pad=0.1, fraction=0.03)
cbar.set_label('Neutral mass density\n' + r'(kg/m$^{3}$)', rotation=270, labelpad=25)
#cbaxes = plt.gcf().add_axes([0.8, 0.1, 0.03, 0.8])
#cb = plt.colorbar(panel.major_ax, cax = cbaxes)
sectime, dt0 = du.convert_datetime_to_sectime(sc_dt, datetime.datetime(dt_fr.year, dt_fr.month, dt_fr.day))
sectime_res = 10 * 60
time_ticks = np.arange(np.floor(sectime[0]/sectime_res)*sectime_res, np.ceil(sectime[-1]/sectime_res)*sectime_res, sectime_res)
from scipy.interpolate import interp1d
f = interp1d(sectime, x, fill_value='extrapolate')
x_time_ticks = f(time_ticks)
f = interp1d(sectime, y, fill_value='extrapolate')
y_time_ticks = f(time_ticks)
panel.major_ax.plot(x_time_ticks, y_time_ticks, '.', markersize=4, color='k')
for ind, time_tick in enumerate(time_ticks):
time = dt0 + datetime.timedelta(seconds=time_tick)
x_time_tick = x_time_ticks[ind]
y_time_tick = y_time_ticks[ind]
if x_time_tick < panel._extent[0] or x_time_tick > panel._extent[1]:
continue
if y_time_tick < panel._extent[2] or y_time_tick > panel._extent[3]:
continue
panel.major_ax.text( x_time_tick, y_time_tick, time.strftime("%d %H:%M"), fontsize=6)
panel.add_coastlines()
panel.add_grids()
plt.gcf().suptitle('Swarm-C neutral mass density\n 2016-02-03T07:00 - 2016-02-03T07:50')
plt.savefig('test_pho_n_aacgm.png', dpi=300)
# plt.show()
def show_n_e(dt_fr, dt_to):
import cdflib
filepath = "~/tmp/SW_OPER_EFIC_LP_1B_20160203T000000_20160203T235959_0501_MDR_EFI_LP.cdf"
cf = cdflib.CDF(filepath)
cf_info = cf.cdf_info()
n_e = cf.varget(variable='Ne')
T_e = cf.varget(variable='Te')
sc_lat = cf.varget(variable='Latitude')
sc_lon = cf.varget(variable='Longitude')
timestamp = cf.varget(variable='Timestamp')
dtstrs = cdflib.cdfepoch.encode(timestamp)
dts = np.empty_like(timestamp, dtype=datetime.datetime)
for ind, dtstr in enumerate(dtstrs):
dts[ind] = datetime.datetime.strptime(dtstr+'000', '%Y-%m-%dT%H:%M:%S.%f')
ind_dt = np.where((dts >= dt_fr) & (dts <= dt_to))[0]
# times = cdflib.cdfepoch.unixtime(timestamp, to_np=True)
sc_lon = sc_lon[ind_dt]
sc_lat = sc_lat[ind_dt]
sc_dt = dts[ind_dt]
rho_n_sc = n_e[ind_dt]
plt.figure(figsize=(8,8))
cs = 'GEO'
panel = PolarMap(
cs='GEO', style='lst-fixed', pole='N', lon_c=None, lst_c=0, mlt_c=None, ut=dt_fr,
boundary_lat=0., proj_type='Stereographic')
# cs = 'AACGM'
#panel = PolarMap(cs=cs, style='mlt-fixed', pole='N', lon_c=None, lst_c=None, mlt_c=0, ut=dt_fr, boundary_lat=30.,
# proj_type='Stereographic')
panel.set_extent(boundary_style='circle')
data = panel.projection.transform_points(ccrs.PlateCarree(), sc_lon, sc_lat)
x = data[:, 0]
y = data[:, 1]
from scipy.stats import linregress
coef = linregress(x, y)
a = coef.slope
b = coef.intercept
# x1 = np.linspace(np.nanmin(x), np.nanmax(x), num=500)
# y1 = np.linspace(np.nanmin(y), np.nanmax(y), num=500)
# x2d, y2d = np.meshgrid(x, y)
# z2d = griddata(data[:, 0:2], rho_n_sc.flatten(), (x2d, y2d), method='nearest')
# z2d_dist = np.abs(a*x2d - y2d + b) / np.sqrt(a**2 + 1)
# z2d = np.where(z2d_dist>1000, np.nan, z2d)
#
# im = panel.major_ax.pcolormesh(x2d, y2d, z2d, vmin=2e-14, vmax=20e-13, cmap='gist_ncar')
# panel.major_ax.plot(sc_lon, sc_lat, transform=ccrs.Geodetic(), linewidth=0.5, color='k')
# plt.colorbar(im)
# xx = np.tile(x, [3, 1])
#
# yy = np.concatenate((y[np.newaxis, :]-150000, y[np.newaxis, :], y[np.newaxis, :]+150000), axis=0)
# zz = np.concatenate((rho_n_sc.T, rho_n_sc.T, rho_n_sc.T), axis=0)
#
# zz = griddata(data[:, 0:2], rho_n_sc.flatten(), (xx, yy), method='nearest')
# im = panel.major_ax.pcolormesh(xx, yy, zz, vmin=2e-14, vmax=20e-13, cmap='gist_ncar')
#
# # panel.major_ax.plot(sc_lon, sc_lat, transform=ccrs.Geodetic(), linewidth=0.5, color='k')
# plt.colorbar(im)
#panel.major_ax.plot(data[:,0], data[:,1], linewidth=3)
from matplotlib.collections import LineCollection
coords = {'lat': sc_lat, 'lon': sc_lon, 'height': 250.}
cs_new = panel.cs_transform(cs_fr='GEO', cs_to=cs, coords=coords)
data = panel.projection.transform_points(ccrs.PlateCarree(), cs_new['lon'], cs_new['lat'])
x = data[:, 0]
y = data[:, 1]
z = rho_n_sc.flatten()
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
norm = colors.LogNorm(vmin=8e3, vmax=1e6)
lc = LineCollection(segments, cmap=cm.cmap_gist_ncar_modified(), norm=norm)
lc.set_array(z)
lc.set_linewidth(6)
line = panel.major_ax.add_collection(lc)
cbar = plt.gcf().colorbar(line, ax=panel.major_ax, pad=0.1, fraction=0.03)
cbar.set_label(r'$n_e$' + '\n' + r'(cm$^{-3}$)', rotation=270, labelpad=25)
#cbaxes = plt.gcf().add_axes([0.8, 0.1, 0.03, 0.8])
#cb = plt.colorbar(panel.major_ax, cax = cbaxes)
sectime, dt0 = du.convert_datetime_to_sectime(sc_dt, datetime.datetime(dt_fr.year, dt_fr.month, dt_fr.day))
sectime_res = 10 * 60
time_ticks = np.arange(np.floor(sectime[0]/sectime_res)*sectime_res, np.ceil(sectime[-1]/sectime_res)*sectime_res, sectime_res)
from scipy.interpolate import interp1d
f = interp1d(sectime, x, fill_value='extrapolate')
x_time_ticks = f(time_ticks)
f = interp1d(sectime, y, fill_value='extrapolate')
y_time_ticks = f(time_ticks)
panel.major_ax.plot(x_time_ticks, y_time_ticks, '.', markersize=4, color='k')
for ind, time_tick in enumerate(time_ticks):
time = dt0 + datetime.timedelta(seconds=time_tick)
x_time_tick = x_time_ticks[ind]
y_time_tick = y_time_ticks[ind]
if x_time_tick < panel._extent[0] or x_time_tick > panel._extent[1]:
continue
if y_time_tick < panel._extent[2] or y_time_tick > panel._extent[3]:
continue
panel.major_ax.text( x_time_tick, y_time_tick, time.strftime("%d %H:%M"), fontsize=6)
panel.add_coastlines()
panel.add_gridlines()
plt.gcf().suptitle('Swarm-C electron density\n' + dt_fr.strftime("%Y%m%dT%H%M") + ' - ' + dt_to.strftime("%Y%m%dT%H%M"))
plt.savefig('swarm_ne_' + cs + '_' + dt_fr.strftime("%Y%m%d_%H%M") + '-' + dt_to.strftime('%H%M'), dpi=300)
plt.show()
def show_T_e(dt_fr, dt_to):
import cdflib
filepath = "~/tmp/SW_OPER_EFIC_LP_1B_20160203T000000_20160203T235959_0501_MDR_EFI_LP.cdf"
cf = cdflib.CDF(filepath)
cf_info = cf.cdf_info()
n_e = cf.varget(variable='Ne')
T_e = cf.varget(variable='Te')
sc_lat = cf.varget(variable='Latitude')
sc_lon = cf.varget(variable='Longitude')
timestamp = cf.varget(variable='Timestamp')
dtstrs = cdflib.cdfepoch.encode(timestamp)
dts = np.empty_like(timestamp, dtype=datetime.datetime)
for ind, dtstr in enumerate(dtstrs):
dts[ind] = datetime.datetime.strptime(dtstr+'000', '%Y-%m-%dT%H:%M:%S.%f')
ind_dt = np.where((dts >= dt_fr) & (dts <= dt_to))[0]
# times = cdflib.cdfepoch.unixtime(timestamp, to_np=True)
sc_lon = sc_lon[ind_dt]
sc_lat = sc_lat[ind_dt]
sc_dt = dts[ind_dt]
rho_n_sc = T_e[ind_dt]
plt.figure(figsize=(8,8))
panel = PolarView(cs='GEO', pole='N', lon_c=None, lst_c=0, ut=dt_fr, boundary_lat=0., proj_style='Stereographic')
panel.add_subplot(major=True)
panel.set_extent(boundary_style='circle')
data = panel.projection.transform_points(ccrs.PlateCarree(), sc_lon, sc_lat)
x = data[:, 0]
y = data[:, 1]
from scipy.stats import linregress
coef = linregress(x, y)
a = coef.slope
b = coef.intercept
# x1 = np.linspace(np.nanmin(x), np.nanmax(x), num=500)
# y1 = np.linspace(np.nanmin(y), np.nanmax(y), num=500)
# x2d, y2d = np.meshgrid(x, y)
# z2d = griddata(data[:, 0:2], rho_n_sc.flatten(), (x2d, y2d), method='nearest')
# z2d_dist = np.abs(a*x2d - y2d + b) / np.sqrt(a**2 + 1)
# z2d = np.where(z2d_dist>1000, np.nan, z2d)
#
# im = panel.major_ax.pcolormesh(x2d, y2d, z2d, vmin=2e-14, vmax=20e-13, cmap='gist_ncar')
# panel.major_ax.plot(sc_lon, sc_lat, transform=ccrs.Geodetic(), linewidth=0.5, color='k')
# plt.colorbar(im)
# xx = np.tile(x, [3, 1])
#
# yy = np.concatenate((y[np.newaxis, :]-150000, y[np.newaxis, :], y[np.newaxis, :]+150000), axis=0)
# zz = np.concatenate((rho_n_sc.T, rho_n_sc.T, rho_n_sc.T), axis=0)
#
# zz = griddata(data[:, 0:2], rho_n_sc.flatten(), (xx, yy), method='nearest')
# im = panel.major_ax.pcolormesh(xx, yy, zz, vmin=2e-14, vmax=20e-13, cmap='gist_ncar')
#
# # panel.major_ax.plot(sc_lon, sc_lat, transform=ccrs.Geodetic(), linewidth=0.5, color='k')
# plt.colorbar(im)
#panel.major_ax.plot(data[:,0], data[:,1], linewidth=3)
from matplotlib.collections import LineCollection
data = panel.projection.transform_points(ccrs.PlateCarree(), sc_lon, sc_lat)
x = data[:, 0]
y = data[:, 1]
z = rho_n_sc.flatten()
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
norm = plt.Normalize(500, 4000)
# norm = colors.LogNorm(vmin=5e2, vmax=1e6)
lc = LineCollection(segments, cmap='gist_ncar', norm=norm)
lc.set_array(z)
lc.set_linewidth(6)
line = panel.major_ax.add_collection(lc)
cbar = plt.gcf().colorbar(line, ax=panel.major_ax, pad=0.1, fraction=0.03)
cbar.set_label(r'$T_e$' + '\n' + r'(K)', rotation=270, labelpad=25)
#cbaxes = plt.gcf().add_axes([0.8, 0.1, 0.03, 0.8])
#cb = plt.colorbar(panel.major_ax, cax = cbaxes)
sectime, dt0 = du.convert_datetime_to_sectime(sc_dt, datetime.datetime(dt_fr.year, dt_fr.month, dt_fr.day))
sectime_res = 10 * 60
time_ticks = np.arange(np.floor(sectime[0]/sectime_res)*sectime_res, np.ceil(sectime[-1]/sectime_res)*sectime_res, sectime_res)
from scipy.interpolate import interp1d
f = interp1d(sectime, x, fill_value='extrapolate')
x_time_ticks = f(time_ticks)
f = interp1d(sectime, y, fill_value='extrapolate')
y_time_ticks = f(time_ticks)
panel.major_ax.plot(x_time_ticks, y_time_ticks, '.', markersize=4, color='k')
for ind, time_tick in enumerate(time_ticks):
time = dt0 + datetime.timedelta(seconds=time_tick)
x_time_tick = x_time_ticks[ind]
y_time_tick = y_time_ticks[ind]
if x_time_tick < panel._extent[0] or x_time_tick > panel._extent[1]:
continue
if y_time_tick < panel._extent[2] or y_time_tick > panel._extent[3]:
continue
panel.major_ax.text( x_time_tick, y_time_tick, time.strftime("%d %H:%M"), fontsize=6)
panel.add_coastlines()
panel.add_grids()
plt.gcf().suptitle('Swarm-C electron temperature\n 2016-02-03T07:00 - 2016-02-03T07:50')
plt.savefig('test_T_e.png', dpi=300)
plt.show()
if __name__ == "__main__":
dt_fr = datetime.datetime(2016, 2, 3, 0, 40)
dt_to = datetime.datetime(2016, 2, 3, 1, 40)
# show_rho_n(dt_fr, dt_to)
show_n_e(dt_fr, dt_to)
# show_T_e(dt_fr, dt_to)
``` |
{
"source": "jouleffect/babyeye",
"score": 3
} |
#### File: babyeye/predictor/show.py
```python
import matplotlib.pyplot as plt
from keras.models import Model
#-------Immagini di esempio---------------#
def show_data_test(test,model):
plt.figure(figsize=(15,6))
for i in range(40):
plt.subplot(4, 10, i+1)
plt.imshow(test[0].reshape((28,28)),cmap=plt.cm.binary)
plt.title("Predict: %d" % model[i].predict(test))
plt.axis('off')
plt.subplots_adjust(wspace=-0.1, hspace=-0.1)
plt.show()
```
#### File: babyeye/trainer/show.py
```python
import matplotlib.pyplot as plt
#-------Immagini di esempio---------------#
def show_data(X_train,Y_train,datagen):
plt.figure(figsize=(15,6))
for i in range(40):
plt.subplot(4, 10, i+1)
X_train2, Y_train2 = datagen.flow(X_train,Y_train).next()
plt.imshow(X_train2[0].reshape((28,28)),cmap=plt.cm.binary)
plt.axis('off')
plt.subplots_adjust(wspace=-0.1, hspace=-0.1)
plt.show()
``` |
{
"source": "jouleffect/SEIRD-Epidemics-Simulator",
"score": 3
} |
#### File: jouleffect/SEIRD-Epidemics-Simulator/simulator.py
```python
import numpy as np
import model
import random
from igraph import *
class Simulator():
"""docstring for Simulator"""
def __init__(self, num_nodi=10000, p_link=0.5, leng=10000, exp0=20, t_exp=3, t_inf=10, alfa=0.5, beta=0.7, gamma=0.1):
self.num_nodi = num_nodi # Numero di nodi del network
self.p_link = p_link # Probabilità di connessione
self.exp0 = exp0 # Numero iniziale di individui esposti alla malattia
self.leng = leng # lunghezza della simulazione
self.dt_state = 1 # incremento del tempo degli stati (1 giorno)
self.num_iter = int(self.leng/self.dt_state) # Numero di iterazioni
self.t_exp = t_exp # Tempo di passaggio da Exposed a Infected (tempo di incubazione del virus)
self.t_inf = t_inf # Durata dell'infezione fino alla guarigione
self.alfa = alfa # Tasso di esposizione al virus
self.beta = beta # tasso di infezione grave
self.gamma = gamma # tasso di mortalità nello stato di infezione grave
def s_to_e(self, network, contacts): # Transizione da stato Suscettibile a Esposto
# Pongo a 1 gli stati epidemici e a 0 gli stati temporali corrispondenti
network.e_state[contacts] = 1
network.t_state[contacts] = 0
def e_to_i(self, network): # Transizione da stato Esposto a Infetto
# Calcolo gli infetti prendendo gli esposti che hanno superato il tempo di esposizione
# Settando a 2 gli stati epidemici e a 0 i corrispondenti stati temporali
inf = np.where(((network.e_state==1) & (network.t_state >= self.t_exp)),1,0)
network.e_state = np.where(inf==1,2,network.e_state)
network.t_state = np.where(inf==1,0,network.t_state)
def i_to_r(self, network): # Transizione da stato Infetto a Recovered
# Calcolo i recovered prendendo gli infetti che hanno superato il tempo di infezione
# Settando a 4 gli stati epidemici
infected = np.where(((network.e_state==2)&(network.t_state>=self.t_inf)),1,0)
network.e_state = np.where(infected == 1, 4, network.e_state)
def i_to_ig(self, network, i): # Transizione da stato Infetto a Gravemente Infetto
# Calcolo i gravemente infetti, prendendo gli infetti che soddisfano la probabilità beta
inf = np.where((network.e_state==2))
ind_inf = inf[0]
Rand = np.random.random((len(ind_inf), 1))
# Setto gli stati epidemici a 3
network.e_state[ind_inf[np.where(Rand<(self.beta*self.dt_state))[0]]] = 3
def ig_to_d(self, network, i): # Transizione da stato Gravemente Infetto a Morto
# Calcolo i morti prendendo gli stati infetti che, superato il tempo di infezione, soddisfano la probabilità gamma
Rand = np.random.random((self.num_nodi,1)) # generazione numero random
# Casi di guarigione
end_inf = np.where((network.t_state >= self.t_inf) & (network.e_state == 3) & (Rand>self.gamma))
network.e_state[end_inf] = 4
# Casi di morte
dead_inf = np.where((network.t_state >= self.t_inf) & (network.e_state == 3) & (Rand<self.gamma))
network.e_state[dead_inf] = 5
``` |
{
"source": "joulephicar/Arsip-WU",
"score": 2
} |
#### File: Binary Exploitation/binary-exploitation-is-ez/solver.py
```python
from pwn import *
#koneksi
#r = process('./ez')
r = remote('192.168.3.11',23170)
#alamat target
ez_win = 0x4014a0
def newMeme(size, content):
r.sendlineafter("Choice: ", "1")
r.sendlineafter("size: ", str(size))
r.sendlineafter("content: ", content)
def editMeme(idx, content):
r.sendlineafter("Choice: ", "2")
r.sendlineafter("Index: ", str(idx))
r.sendlineafter("content: ", content)
def printMeme(idx):
r.sendlineafter("Choice: ", "3")
r.sendlineafter("Index: ", str(idx))
r.interactive()
#membuat meme baru
newMeme(1, "TEST1")
newMeme(2, "TEST2")
newMeme(3, "TEST3")
newMeme(4, "TEST4")
newMeme(5, "TEST5")
newMeme(6, "TEST6")
newMeme(7, "TEST7")
newMeme(8, "TEST8")
#overwrite content dengan alamat target
editMeme(0, p64(ez_win)*5)
#menampilkan flag
printMeme(1)
``` |
{
"source": "joules457/cfdi-iva-snippet",
"score": 3
} |
#### File: joules457/cfdi-iva-snippet/cfdi_directory.py
```python
import os
import sys
import argparse
from tabulate import tabulate
from cfdi.utils import export as export_utils
from cfdi.utils import cfdi_amounts as cfdia
def get_directory_path():
"""Get directory path.
Parameters
----------
Returns
-------
str
Directory path.
"""
return os.path.dirname(
os.path.realpath(__file__)
)
EXPORT_FILENAME = os.path.join(
get_directory_path(), 'cfdi_directory.csv'
)
parser = argparse.ArgumentParser(
description="""
This script determines the IVA total amount for given directory
"""
)
parser.add_argument(
'-dir',
'--directory',
type=str,
help='CFDI\'s directory: /home/2020/january/received/cfdi',
required=True
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Print Verbose Info',
required=False
)
parser.add_argument(
'-e',
'--export',
action='store_true',
help='Export scan result to cfdi_directory.csv file',
required=False
)
# Shell args!!!
script_args = vars(parser.parse_args())
def main(my_args):
my_status = True
# data_table = []
total_table = []
try:
print('CFDI Directory: ', my_args['directory'])
cfdi_amounts = cfdia.get_directory_cfdi_amounts(
my_args['directory']
)
if cfdi_amounts['status'] == 1:
raise cfdi_amounts['error']
if my_args['export']:
export_utils.csv_file(
cfdi_amounts['info'],
EXPORT_FILENAME
)
if my_args['verbose']:
print(
tabulate(cfdi_amounts['info'], headers='keys')
)
total_table.append({
'total': 'Subtotal',
'amount': cfdi_amounts['subtotal_cfdi_amount']
})
total_table.append({
'total': 'Discount',
'amount': cfdi_amounts['discount_cfdi_amount']
})
total_table.append({
'total': 'IVA',
'amount': cfdi_amounts['iva_cfdi_amount']
})
total_table.append({
'total': 'Total',
'amount': cfdi_amounts['total_cfdi_amount']
})
print(
tabulate(total_table)
)
except Exception as err:
print('ERR: ', err)
my_status = False
return my_status
if __name__ == "__main__":
my_status = 0 if main(script_args) is True else 1
sys.exit(my_status)
```
#### File: cfdi/utils/directory.py
```python
import re
import os
from cfdi.utils import constants as ccfdi
def get_directory_files(
path: str,
include_pattern: re.Pattern = None,
exclude_pattern: re.Pattern = None
):
"""List files from given path directory,
take an include and exclude patterns.
Parameters
----------
include_pattern : re.Pattern
Include files re.compile pattern.
exclude_pattern : re.Pattern
Exclude files re.compile pattern.
Returns
-------
dict
List of files that match given pattern.
{status, files}
"""
result = {
'status': 0,
'files': [],
}
try:
my_include_pattern = include_pattern \
if include_pattern \
else ccfdi.ALL_FILE_PATTERN
my_exclude_pattern = exclude_pattern \
if exclude_pattern \
else ccfdi.EMPTY_STRING_PATTERN
files = [
f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))
]
for file in files:
if my_include_pattern.match(file) and \
not my_exclude_pattern.match(file):
result['files'].append(file)
except Exception as err:
result['status'] = 1
result['files'] = None
result['error'] = err
return result
```
#### File: cfdi/utils/export.py
```python
import os
import csv
def csv_file(dict_list: list, filename: str):
"""write a CSV File with given filename and data.
Parameters
----------
dict_list : list
List of dicts.
filename : str
FIlename.
Returns
-------
dict
Result {status, csv_filename}.
"""
result = {
'status': 0,
'csv_filename': None
}
try:
fieldnames = dict_list[0].keys() if dict_list else []
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(
csvfile,
fieldnames=fieldnames,
quoting=csv.QUOTE_MINIMAL
)
writer.writeheader()
for row in dict_list:
writer.writerow(row)
result['csv_filename'] = filename
except Exception as err:
result['status'] = 1
result['error'] = err
return result
```
#### File: cfdi-iva-snippet/tests/test_utils_directory.py
```python
import os
import json
import pytest
from tests.resources import scenarios
from cfdi.utils import directory as dir_utils
@pytest.fixture(scope='session')
def dir_path():
return os.path.dirname(
os.path.realpath(__file__)
)
def test_get_directory_files(dir_path):
for scenario in scenarios.DIRECTORY_FILES:
abs_path = os.path.join(dir_path, scenario['payload']['path'])
result = dir_utils.get_directory_files(
abs_path,
scenario['payload']['include_pattern']
)
if scenario['error']:
assert result['status'] == 1
assert result['files'] is None
assert isinstance(result['error'], Exception)
else:
assert result['status'] == 0
assert isinstance(result['files'], list)
assert json.dumps(result['files'].sort()) == \
json.dumps(scenario['result'].sort())
# print(result)
``` |
{
"source": "JoulesCH/Automated-Trading-System",
"score": 2
} |
#### File: Automated-Trading-System/WebApp/callbacks.py
```python
import base64
import io
import statistics
from datetime import date
import os
import json
from random import choice
import string
import time
import subprocess
# Local packages
from views import appD
# Installed packages
import humanize
from dash.dependencies import Input, Output, State
from dash import html
import plotly.graph_objects as go
import pandas as pd
from dash import dcc
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
from plotly.subplots import make_subplots
import yfinance as yf
@appD.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
@appD.callback(Output('output-data-upload', 'children'),
Output('uploadFile', 'children'),
Output('memory', 'data'),
Input('upload-data', 'contents'),
Input('goDownload', 'n_clicks'),
State('upload-data', 'filename'),
State('upload-data', 'last_modified'),
State('symbol', 'value'),
State('frequency', 'value'),
State('date-picker', 'start_date'),
State('date-picker', 'end_date'))
def update_output(content, n, filename, last_modified, symbol, frequency, start, end):
print("***************************", start, end)
if not filename:
if not n:
raise PreventUpdate
elif not symbol or not frequency or not frequency or not start or not end :
return dbc.Alert("Llena todos los datos", color="danger"), None, None
else:
listDecoded = yf.Ticker(symbol).history(period=frequency.lower(), start=start, end = end).Close.to_list()
elif filename[-4:] not in ['.txt', '.csv']:
return dbc.Alert("Tipo de archivo no permitido", color="danger"), None, None
else:
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string).decode('utf-8').replace('\r','')
try:
listDecoded = [ float(value) for value in decoded.split('\n')]
except:
return dbc.Alert("Hay un dato no válido", color="danger"),
fig = go.Figure(data = [go.Scatter(x=list(range(len(listDecoded))), y=listDecoded, mode = 'lines+markers')])
fig.update_xaxes(rangeslider_visible=True)
fig.layout.height = 700
fig.update_layout(template='plotly_dark', paper_bgcolor="#001F40", plot_bgcolor = "#000F20")
data = {
'filename':filename,
'last_modified':last_modified,
'data':listDecoded,
'symbol': symbol if symbol else filename[:filename.find('.')]
}
return html.Div([
html.H2(symbol if symbol else filename[:filename.find('.')], style = {'margin-bottom':'40px'}),
html.Div([
dcc.Graph(figure = fig, animate = True, id = 'tradeGraph'),
], id = 'grafico1'),
html.Div([
html.Div([
dbc.Card(
[
dbc.CardHeader(
html.H2(
dbc.Button(
f"¿No estás seguro de los parámetros? Da clic y abre el glosario",
color="link",
id=f"group-1-toggle",
n_clicks=0,
)
)
),
dbc.Collapse(
dbc.CardBody([
html.Ul([
html.Li([html.B("Precisión"), ": Representa el margen de error con el que se decide si un punto es de soporte o de resistencia"]),
html.Li([html.B("Máximo de inversión"), ": Indica el monto máximo que se puede invertir, es relativo al capital disponible"]),
html.Li([html.B("Stop Loss"), ": La posición se cerrará cuando se haya perdido esta cantidad"]),
html.Li([html.B("Take Profit"), ": Cuando la ganancia sea igual al precio de apertura multiplicado por este parámetro, entonces se cerrará la posición"]),
])
]),
id=f"collapse-1",
is_open=False,
),
]
),
], className="accordion", style = {'text-align':'left','color':'black','width':'70%','margin-left':'auto', 'margin-right':'auto', 'margin-bottom':'30px'}),
dbc.FormGroup(
[
dbc.Row([
dbc.Col([
dbc.Label("Capital inicial:"),
dbc.Input(placeholder="Valor entero mayor a 0", type="number", min = 0, value = 2000.00, id = 'Capital'),
]),
dbc.Col([
dbc.Label("Precisión:"),
dbc.Input(placeholder="ERROR", type="number", min = 0, value = 0.006, id = 'ERROR'),
]),
dbc.Col([
dbc.Label("Máximo de inversión relativo:"),
dbc.Input(placeholder="MAX_INV", type="number", min = 0, value = 0.8, id = 'MAX_INV'),
]),
]),
dbc.Row([
dbc.Col([
dbc.Label("Stop Loss absoluto:"),
dbc.Input(placeholder="STOP_LOSS", type="number", min = 0, value = 10000, id = 'STOP_LOSS'),
]),
dbc.Col([
dbc.Label("Take Profit relativo:"),
dbc.Input(placeholder="TAKE_PROFIT", type="number", min = 1, value = 1.02, id = 'TAKE_PROFIT'),
]),
])
], style = {'width':'70%','margin-left':'auto', 'margin-right':'auto'}, id = 'formCapital'
),
dbc.Button('¡Empezar a tradear!', color="primary", className="mr-1", id = 'beginButton'),
],
style = {'text-align':'center'}
)
]), html.Div([
dbc.Alert([
html.H4("Datos cargados con éxito"),
html.Hr(),
"Revisa la gráfica y desliza hasta abajo para ingresar los datos faltantes",
], color="primary",duration=5000,
)
]), data
i = 1
@appD.callback(
Output(f"collapse-{i}", "is_open") ,
Input(f"group-{i}-toggle", "n_clicks"),
State(f"collapse-{i}", "is_open"),
)
def toggle_accordion(n1, is_open1):
if n1:
return not is_open1
return is_open1
def conect(data, capital, sma1, sma2, ERROR, MAX_INV, STOP_LOSS, TAKE_PROFIT):
filename = ''.join(choice(string.ascii_lowercase + string.ascii_uppercase) for _ in range(10)) + '.txt'
# f = open(filename, "w")
# f.write(data.replace('*',' '))
# f.close()
# Se ejecuta el programa
# os.system(f"./main {filename} {capital}")
# time.sleep(1)
parametros = [str(ERROR), str(MAX_INV), str(STOP_LOSS), str(TAKE_PROFIT)]
stdOutput = os.popen(f'./main {filename} {capital} {" ".join(parametros)} {data.replace("*", " ")}').read()
#print(stdOutput, stdOutput[stdOutput.find('{'):])
stdOutput = stdOutput[stdOutput.find('{'):]
print('\n\n\n','*'*25, stdOutput, '*'*25,'\n\n\n' )
if not stdOutput:
return None
data = json.loads(stdOutput)
# Se recolectan los datos
# with open(f"{filename}.json") as json_file:
# data = json.load(json_file)
# os.system(f"rm {filename}")
# os.system(f"rm {filename}.json")
return data
@appD.callback(Output('grafico1', 'children'),
Output('beginButton', 'children'),
Output('formCapital', 'children'),
Output('tittle', 'children'),
Output('Results', 'children'),
Input("beginButton","n_clicks"),
State('memory', 'data'),
State('Capital','value'),
State('ERROR','value'),
State('MAX_INV','value'),
State('STOP_LOSS','value'),
State('TAKE_PROFIT','value'),
)
def beginTrading(n, localMemory, Capital, ERROR, MAX_INV, STOP_LOSS, TAKE_PROFIT):
if not n:
raise PreventUpdate
else:
sma1 = 3
sma2 = 10
capital_ini = Capital
#response = requests.post('http://127.0.0.1:5050/', {'data':'*'.join(str(x) for x in localMemory['data']), 'capital':capital_ini, 'sma1': sma1, 'sma2':sma2}).json()
response = conect(ERROR = ERROR, MAX_INV = MAX_INV, STOP_LOSS = STOP_LOSS, TAKE_PROFIT = TAKE_PROFIT,
**{ 'data':'*'.join(str(x) for x in localMemory['data']),
'capital':capital_ini,
'sma1': sma1,
'sma2':sma2,
})
if not response:
return [None,
'Actualizar capital',
[dbc.FormGroup(
[
dbc.Row([
dbc.Col([
dbc.Label("Capital inicial:"),
dbc.Input(placeholder="Valor entero mayor a 0", type="number", min = 0, value = Capital, id = 'Capital'),
]),
dbc.Col([
dbc.Label("Precisión:"),
dbc.Input(placeholder="ERROR", type="number", min = 0, value = ERROR, id = 'ERROR'),
]),
dbc.Col([
dbc.Label("Máximo de inversión relativo:"),
dbc.Input(placeholder="MAX_INV", type="number", min = 0, value = MAX_INV, id = 'MAX_INV'),
]),
]),
dbc.Row([
dbc.Col([
dbc.Label("Stop Loss absoluto:"),
dbc.Input(placeholder="STOP_LOSS", type="number", min = 0, value = STOP_LOSS, id = 'STOP_LOSS'),
]),
dbc.Col([
dbc.Label("Take Profit relativo:"),
dbc.Input(placeholder="TAKE_PROFIT", type="number", min = 1, value = TAKE_PROFIT, id = 'TAKE_PROFIT'),
]),
])
], style = {'width':'70%','margin-left':'auto', 'margin-right':'auto'}, id = 'formCapital'
), ],
dbc.Alert("Capital insuficiente. Utiliza un capital que se iguale al precio ", color="danger"), None]
listDecoded = localMemory['data']
fig = go.Figure()
fig.layout.height = 700
fig.update_xaxes(rangeslider_visible=True)
df_stock = pd.DataFrame({'close':listDecoded})
df_stock['SMA'] = df_stock.close.rolling(window = 20).mean()
df_stock['stddev'] = df_stock.close.rolling(window = 20).std()
df_stock['upper'] = df_stock.SMA + 2*df_stock.stddev
df_stock['lower'] = df_stock.SMA - 2*df_stock.stddev
#print(df_stock.iloc[19:30,:])
fig.add_trace(go.Scatter(x = list(range(len(listDecoded))), y = df_stock.lower,mode = 'lines', name = f'Lower',
line = {'color': '#ff0000'}, fill = None ))
fig.add_trace(go.Scatter(x = list(range(len(listDecoded))), y = df_stock.upper,mode = 'lines', name = f'Upper',
line = {'color': '#ffffff'} , fill='tonexty', fillcolor = 'rgba(255, 255, 255, 0.1)' ))
fig.add_trace(go.Scatter(name='Datos Originales',x=list(range(len(listDecoded))), y=listDecoded, mode = 'lines+markers',
line = {'color':'#636EFA'}))
capital_final = response['capitalFinal']
gain = response['gain']
loss = response['loss']
data = response['data']
balance = response['balance']
if not data:
return [None,
'Actualizar capital',
[dbc.FormGroup(
[
dbc.Row([
dbc.Col([
dbc.Label("Capital inicial:"),
dbc.Input(placeholder="Valor entero mayor a 0", type="number", min = 0, value = Capital, id = 'Capital'),
]),
dbc.Col([
dbc.Label("Precisión:"),
dbc.Input(placeholder="ERROR", type="number", min = 0, value = ERROR, id = 'ERROR'),
]),
dbc.Col([
dbc.Label("Máximo de inversión relativo:"),
dbc.Input(placeholder="MAX_INV", type="number", min = 0, value = MAX_INV, id = 'MAX_INV'),
]),
]),
dbc.Row([
dbc.Col([
dbc.Label("Stop Loss absoluto:"),
dbc.Input(placeholder="STOP_LOSS", type="number", min = 0, value = STOP_LOSS, id = 'STOP_LOSS'),
]),
dbc.Col([
dbc.Label("Take Profit relativo:"),
dbc.Input(placeholder="TAKE_PROFIT", type="number", min = 1, value = TAKE_PROFIT, id = 'TAKE_PROFIT'),
]),
])
], style = {'width':'70%','margin-left':'auto', 'margin-right':'auto'}, id = 'formCapital'
), ],
dbc.Alert("Capital insuficiente. Utiliza un capital que se iguale al precio ", color="danger"), None]
measures = ["absolute"]; x = ["Capital Inicial"]; y=[capital_ini-capital_ini]; text = ["Capital Inicial"]
suma_balances_cerrados = capital_ini
for move in data:
fig.add_trace(go.Scatter(x = [move['OpenIdx'], move['CloseIdx']], y= [move['OpenValue'], move['CloseValue']],
name = move['Movimiento'],
mode = 'lines+markers',
marker_symbol = ['triangle-up', 'triangle-down'],
marker_color = ['green', 'red'],
marker_size=15,
#line =
),
)
if move['Balance']:
measures.append('relative')
x.append(move['Movimiento'])
y.append(move['Balance'])
suma_balances_cerrados+=move['Balance']
text.append(f'{move["Balance"]}')
sma1List = []
sma2List = []
for idx in range(len(listDecoded)):
sma1List.append(
statistics.mean(listDecoded[idx:idx+sma1])
)
sma2List.append(
statistics.mean(listDecoded[idx:idx+sma2])
)
fig.add_trace(go.Scatter(x = list(range(sma1, len(listDecoded))), y= sma1List,
name = f'SMA_{sma1}',
mode = 'lines',
line = {'color': '#FF8A8A'}
),
)
fig.add_trace(go.Scatter(x = list(range(sma2, len(listDecoded))), y= sma2List,
name = f'SMA_{sma2}',
mode = 'lines',
line = {'color': '#949494'}
),
)
measures.append('total')
x.append('Capital Final')
y.append(capital_final)
text.append('Capital Final')
df = pd.DataFrame(data)
balances = []
for value in df['Balance']:
if not value: balances.append(html.P("", style = {'color':'red'}))
elif value <= 0: balances.append(html.P(value, style = {'color':'red'}))
else: balances.append(html.P(value, style ={'color':'green'}))
df['Balance'] = balances
fig2 = make_subplots(
rows=2, cols=2,
vertical_spacing=0.1,
specs=[[{'type':'domain'}, {'type':'domain'}],
[{"type": "waterfall", "colspan": 2}, None],
]
)
fig2.add_trace(
go.Waterfall(
name = "20", orientation = "v",
measure = measures,
x = x,
text = text,
y =y,
connector = {"line":{"color":"rgb(63, 63, 63)"}},
base = capital_ini
), row=2, col=1
)
labels = ["Gain", "Loss"]
fig2.add_trace(go.Pie(labels=labels, values=[gain, loss], name="Gain vs Loss", hole=.9,
title = {"text": "<span style='font-size:0.7em;color:gray'>Gain vs Loss</span>"}
),
1, 1)
fig2.add_trace(go.Indicator(
mode = "number+delta",
value = suma_balances_cerrados,
title = {"text": "<span style='font-size:0.7em;color:gray'>Total ganado</span>"},
delta = {'position': "bottom", 'reference': capital_ini, 'relative': False},
),
1, 2)
fig2.update_layout(
showlegend=False,
#title_text="Resumen de movimientos cerrados",
#autosize=True,
#height=500,
)
fig.update_layout(template='plotly_dark', paper_bgcolor="#001F40", plot_bgcolor = "#000F20", margin=dict(l=5, r=5, t=5, b=5),
showlegend=False,)
fig2.update_layout(template='plotly_dark', paper_bgcolor="#001F40", plot_bgcolor = "#000F20", margin=dict(l=5, r=5, t=5, b=5))
return [None,
"Modificar parámetros",
None,
html.Div([
html.H1('Resultados para ' + localMemory['symbol'], style = {'text-align':'center', 'margin-bottom':'30px'}),
html.H2(['Resumen de movimientos ', html.B('cerrados') ]),
dcc.Graph(figure = fig2, config={'autosizable':True}),
html.H2('Movimientos efectuados:'),
dbc.Row([
dbc.Col([
html.Div(
dbc.Table.from_dataframe(df, striped=True,
responsive = True,
bordered=True, hover=True,
size = 'sm',
dark=True,
),
style={'height': '700px', 'overflowY': 'auto', 'margin-top':'50px', 'padding':'0px'},
className = 'tableHG'
),
], width=5, style = {'padding-right':'5px'}),
dbc.Col([
dcc.Graph(figure = fig, animate = True, style = {'padding':'0px'}),
], width=7, style = {'padding':'0px'}),
]),
html.H2('Parámetros ingresados:'),
html.P("Modifica los parámetros y da clic al botón"),
dbc.FormGroup(
[
dbc.Row([
dbc.Col([
dbc.Label("Capital inicial:"),
dbc.Input(placeholder="Valor entero mayor a 0", type="number", min = 0, value = Capital, id = 'Capital'),
]),
dbc.Col([
dbc.Label("Precisión:"),
dbc.Input(placeholder="ERROR", type="number", min = 0, value = ERROR, id = 'ERROR'),
]),
dbc.Col([
dbc.Label("Máximo de inversión relativo:"),
dbc.Input(placeholder="MAX_INV", type="number", min = 0, value = MAX_INV, id = 'MAX_INV'),
]),
]),
dbc.Row([
dbc.Col([
dbc.Label("Stop Loss absoluto:"),
dbc.Input(placeholder="STOP_LOSS", type="number", min = 0, value = STOP_LOSS, id = 'STOP_LOSS'),
]),
dbc.Col([
dbc.Label("Take Profit relativo:"),
dbc.Input(placeholder="TAKE_PROFIT", type="number", min = 1, value = TAKE_PROFIT, id = 'TAKE_PROFIT'),
]),
])
], style = {'width':'70%','margin-left':'auto', 'margin-right':'auto'}, id = 'formCapital'
),
]),
[
dbc.Row([
dbc.Col(f'Capital actual (ℹ)', width=2, id ='p-1-target'),
dbc.Col(f'Balance (ℹ)', width=2, id ='p-2-target'),
#dbc.Col(f'Margen: ${0}', width=2),
#dbc.Col(f'Margen Libre: {0}%', width=2),
dbc.Col(f'Beneficio actual (ℹ)', width=2, id ='p-3-target'),
], justify="center", style = {"width":"100%", 'color': 'rgb(255, 132, 0)'}),
dbc.Row([
dbc.Col(f'${ humanize.intcomma(float(format(capital_final, ".2f")))}', width=2),
dbc.Col(f'${humanize.intcomma(float( format(balance, ".2f") ))}', width=2),
#dbc.Col(f'Margen: ${0}', width=2),
#dbc.Col(f'Margen Libre: {0}%', width=2),
dbc.Col(f'${ humanize.intcomma(float(format(balance-capital_final, ".2f")))}', width=2),
], justify="center", style = {"width":"100%"}),
]+[
dbc.Popover(
[
dbc.PopoverHeader(title),
dbc.PopoverBody(desc),
],
id=f"popover-{placement}",
target=f"p-{placement}-target",
placement="top",
is_open=False,
trigger = 'hover'
) for placement, title, desc in zip([1,2,3],['Info']*3, ["Capital sin tomar en cuenta el cierre de posiciones abiertas",
"Patrimonio contando el valor actual de las posiciones abiertas ",
"Ganacia/perdida de las posiciones abiertas"
]
)
],
]
from resources.dashTemplates import paths
base = '/dash/'
paths = {
base+'mt':paths.marco_teorico,
base+'d': paths.diagrama,
base+'r':paths.repositorio,
base+'p':paths.pruebas,
base+'c':paths.conclusiones
}
@appD.callback(Output('principalLaout', 'children'),
Input('url', 'pathname'))
def updatePrincipalLayout(pathname):
print(pathname)
if pathname == '/dash/' or pathname not in paths:
raise PreventUpdate
return paths[pathname]
``` |
{
"source": "JoulesCH/juegos_del_hambre",
"score": 2
} |
#### File: jdh/resources/__init__.py
```python
from flask import request, redirect
def login_required(function):
def check_login(*args, **kwargs):
if 'token' not in request.cookies:
return redirect(f'/login?next={request.path}')
else:
return function(*args, **kwargs)
# Renaming the function name:
check_login.__name__ = function.__name__
return check_login
def not_login_required(function):
def check_not_login(*args, **kwargs):
if 'loggedIn' in request.cookies or 'token' in request.cookies:
return redirect('/')
else:
return function(*args, **kwargs)
# Renaming the function name:
check_not_login.__name__ = function.__name__
return check_not_login
```
#### File: jdh/resources/stock_data.py
```python
from flask import request
import yfinance as yf
import redis
import requests
# Local
from utils.symbols import symbols
import models as m
# Built in packages
from datetime import date
import os
import json
from .datenow import day_of_week
r = redis.Redis.from_url(os.getenv('REDIS_URL'))
url = "https://currency-converter5.p.rapidapi.com/currency/convert"
querystring = {"format": "json", "from": "USD", "to": "MXN", "amount": "1"}
headers = {
'x-rapidapi-host': "currency-converter5.p.rapidapi.com",
'x-rapidapi-key': os.getenv('CURRENCY_API_KEY')
}
def currency():
if r.exists('MXN'):
return float(r.get('MXN'))
else:
response = requests.request("GET", url, headers=headers, params=querystring).json()['rates']['MXN']['rate']
r.set('MXN', response)
r.expire('MXN', 60 * 60 * 1)
return float(response)
def get(symbol=None):
if not symbol:
symbol = request.get_json()['symbol']
if r.exists(symbol):
print(f'Leyendo desde cache datos: {r.memory_usage(symbol)}', flush=True)
data = json.loads(r.get(symbol))
else:
print('Consultando datos', flush=True)
data = yf.Ticker(symbol).history(period="D1", start=str(date(2021, 5, 1)))
if '.MX' in symbol:
close_values = data.Close.to_list()
else:
# Convertir a pesos mexicanos
mxn = currency()
close_values = [d*mxn for d in data.Close.to_list()]
labels = [str(datee).replace(' 00:00:00', '') for datee in data.index]
data = dict(data=close_values, labels=labels, symbol=symbol)
r.set(symbol, json.dumps(data))
r.expire(symbol, 60*60*12)
return data # {'symbol': symbol, 'data':close_values, 'labels':labels}
def restore(key):
# 1) Reiniciar datos cacheados
day = day_of_week()
for symbol in symbols:
if r.exists(symbol):
r.delete(symbol)
for cuenta in m.Cuenta.query.all():
# 2) Iterar sobre cuentas
balance = 0
beneficio = 0
for grafico in cuenta.graficos.all():
# 2.1) Iterar sobre sus gráficos
valor_actual = get(grafico.simbolo)['data'][-1]
for posicion in grafico.posiciones.all():
# 2.1.1) Iterar sobre sus posiciones
if not posicion.cerrado:
# 2.1.2) Si posición está abierta:
# Acutalizar balance
actual = posicion.volumen*valor_actual
balance += actual
beneficio += posicion.volumen*(valor_actual - posicion.valor_compra ) - posicion.interes_compra - posicion.volumen*posicion.valor_compra*0.0029
# Actualizar balance
cuenta.balance = balance
cuenta.beneficio = beneficio
if day == 1:
cuenta.no_movimientos = 2
m.db.session.commit()
return {'status': 1}
``` |
{
"source": "joulez/Limnoria",
"score": 2
} |
#### File: plugins/Channel/test.py
```python
from supybot.test import *
import supybot.conf as conf
import supybot.ircdb as ircdb
import supybot.ircmsgs as ircmsgs
class ChannelTestCase(ChannelPluginTestCase):
plugins = ('Channel', 'User')
def setUp(self):
super(ChannelTestCase, self).setUp()
self.irc.state.channels[self.channel].addUser('foo')
self.irc.state.channels[self.channel].addUser('bar')
def testLobotomies(self):
self.assertRegexp('lobotomy list', 'not.*any')
## def testCapabilities(self):
## self.prefix = 'foo!bar@baz'
## self.irc.feedMsg(ircmsgs.privmsg(self.irc.nick, 'register foo bar',
## prefix=self.prefix))
## u = ircdb.users.getUser(0)
## u.addCapability('%s.op' % self.channel)
## ircdb.users.setUser(u)
## self.assertNotError(' ')
## self.assertResponse('user capabilities foo', '[]')
## self.assertNotError('channel addcapability foo op')
## self.assertRegexp('channel capabilities foo', 'op')
## self.assertNotError('channel removecapability foo op')
## self.assertResponse('user capabilities foo', '[]')
def testCapabilities(self):
self.assertNotError('channel capability list')
self.assertNotError('channel capability set -foo')
self.assertNotError('channel capability unset -foo')
self.assertError('channel capability unset -foo')
self.assertNotError('channel capability set -foo bar baz')
self.assertRegexp('channel capability list', 'baz')
self.assertNotError('channel capability unset -foo baz')
self.assertError('channel capability unset baz')
def testEnableDisable(self):
self.assertNotRegexp('channel capability list', '-Channel')
self.assertError('channel enable channel')
self.assertNotError('channel disable channel')
self.assertRegexp('channel capability list', '-Channel')
self.assertNotError('channel enable channel')
self.assertNotRegexp('channel capability list', '-Channel')
self.assertNotError('channel disable channel nicks')
self.assertRegexp('channel capability list', '-Channel.nicks')
self.assertNotError('channel enable channel nicks')
self.assertNotRegexp('channel capability list', '-Channel.nicks')
self.assertNotRegexp('channel capability list', 'nicks')
self.assertNotError('channel disable nicks')
self.assertRegexp('channel capability list', 'nicks')
self.assertNotError('channel enable nicks')
self.assertError('channel disable invalidPlugin')
self.assertError('channel disable channel invalidCommand')
def testUnban(self):
self.assertError('unban foo!bar@baz')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
m = self.getMsg('unban foo!bar@baz')
self.assertEqual(m.command, 'MODE')
self.assertEqual(m.args, (self.channel, '-b', 'foo!bar@baz'))
self.assertNoResponse(' ', 2)
def testErrorsWithoutOps(self):
for s in 'op deop halfop dehalfop voice devoice kick invite'.split():
self.assertError('%s foo' % s)
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('%s foo' % s)
self.irc.feedMsg(ircmsgs.deop(self.channel, self.nick))
def testWontDeItself(self):
for s in 'deop dehalfop devoice'.split():
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertError('%s %s' % (s, self.nick))
def testOp(self):
self.assertError('op')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('op')
m = self.getMsg('op foo')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+o', 'foo'))
m = self.getMsg('op foo bar')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+o', 'foo'))
m = self.irc.takeMsg()
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+o', 'bar'))
self.irc.state.supported['MODES'] = 2
m = self.getMsg('op foo bar')
try:
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+oo', 'foo', 'bar'))
finally:
self.irc.state.supported['MODES'] = 1
def testHalfOp(self):
self.assertError('halfop')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('halfop')
m = self.getMsg('halfop foo')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+h', 'foo'))
m = self.getMsg('halfop foo bar')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+h', 'foo'))
m = self.irc.takeMsg()
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+h', 'bar'))
def testVoice(self):
self.assertError('voice')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('voice')
m = self.getMsg('voice foo')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+v', 'foo'))
m = self.getMsg('voice foo bar')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+v', 'foo'))
m = self.irc.takeMsg()
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+v', 'bar'))
def assertKban(self, query, hostmask, **kwargs):
m = self.getMsg(query, **kwargs)
self.assertEqual(m, ircmsgs.ban(self.channel, hostmask))
m = self.getMsg(' ')
self.assertEqual(m.command, 'KICK')
def assertBan(self, query, hostmask, **kwargs):
m = self.getMsg(query, **kwargs)
self.assertEqual(m, ircmsgs.ban(self.channel, hostmask))
def testIban(self):
self.irc.feedMsg(ircmsgs.join(self.channel,
prefix='foobar!<EMAIL>'))
self.assertError('iban foo!bar@baz')
self.irc.feedMsg(ircmsgs.op(self.channel, self.irc.nick))
self.assertBan('iban foo!bar@baz', 'foo!bar@baz')
self.assertBan('iban foobar', 'foobar!<EMAIL>')
conf.supybot.protocols.irc.strictRfc.setValue(True)
self.assertError('iban $a:nyuszika7h')
self.assertError('unban $a:nyuszika7h')
conf.supybot.protocols.irc.strictRfc.setValue(False)
self.assertBan('iban $a:nyuszika7h', '$a:nyuszika7h')
self.assertNotError('unban $a:nyuszika7h')
## def testKban(self):
## self.irc.prefix = '[email protected]'
## self.irc.nick = 'something'
## self.irc.feedMsg(ircmsgs.join(self.channel,
## prefix='foobar!user@<EMAIL>'))
## self.assertError('kban foobar')
## self.irc.feedMsg(ircmsgs.op(self.channel, self.irc.nick))
## self.assertError('kban foobar -1')
## self.assertKban('kban foobar', '*!*@*.<EMAIL>')
## self.assertKban('kban --exact foobar', 'foobar!<EMAIL>@<EMAIL>')
## self.assertKban('kban --host foobar', '*!*@host.<EMAIL>')
## self.assertKban('kban --user foobar', '*!user@*')
## self.assertKban('kban --nick foobar', 'foobar!*@*')
## self.assertKban('kban --nick --user foobar', 'foobar!user@*')
## self.assertKban('kban --nick --host foobar',
## 'foobar!*@host.<EMAIL>')
## self.assertKban('kban --user --host foobar', '*!user@host.<EMAIL>')
## self.assertKban('kban --nick --user --host foobar',
## 'foobar!<EMAIL>')
## self.assertNotRegexp('kban adlkfajsdlfkjsd', 'KeyError')
## self.assertNotRegexp('kban foobar time', 'ValueError')
## self.assertError('kban %s' % self.irc.nick)
def testBan(self):
with conf.supybot.protocols.irc.banmask.context(['exact']):
self.assertNotError('ban add foo!bar@baz')
self.assertNotError('ban remove foo!bar@baz')
orig = conf.supybot.protocols.irc.strictRfc()
with conf.supybot.protocols.irc.strictRfc.context(True):
# something wonky is going on here. irc.error (src/Channel.py|449)
# is being called but the assert is failing
self.assertError('ban add not!a.hostmask')
self.assertNotRegexp('ban add not!a.hostmask', 'KeyError')
self.assertError('ban add $a:nyuszika7h')
self.assertError('ban remove $a:nyuszika7h')
conf.supybot.protocols.irc.strictRfc.setValue(False)
self.assertNotError('ban add $a:nyuszika7h')
self.assertNotError('ban remove $a:nyuszika7h')
def testBanList(self):
self.assertNotError('ban add foo!bar@baz')
self.assertNotError('ban add foobar!*@baz')
self.assertNotError('ban add foobar!qux@baz')
self.assertRegexp('ban list', r'.*foo!bar@baz.*')
self.assertRegexp('ban list', r'.*foobar!\*@baz.*')
self.assertRegexp('ban list', r'.*foobar!qux@baz.*')
self.assertNotRegexp('ban list foobar!*@baz', r'.*foo!bar@baz.*')
self.assertRegexp('ban list foobar!*@baz', r'.*foobar!\*@baz.*')
self.assertRegexp('ban list foobar!*@baz', r'.*foobar!qux@baz.*')
self.assertResponse('ban list foobar!\*@baz',
'"foobar!*@baz" (never expires)')
def testIgnore(self):
orig = conf.supybot.protocols.irc.banmask()
def ignore(given, expect=None):
if expect is None:
expect = given
self.assertNotError('channel ignore add %s' % given)
self.assertResponse('channel ignore list', "'%s'" % expect)
self.assertNotError('channel ignore remove %s' % expect)
self.assertRegexp('channel ignore list', 'not currently')
ignore('foo!bar@baz', '*!*@baz')
ignore('foo!*@*')
with conf.supybot.protocols.irc.banmask.context(['exact']):
ignore('foo!bar@baz')
ignore('foo!*@*')
self.assertError('ban add not!a.hostmask')
def testNicks(self):
self.assertResponse('channel nicks', 'bar, foo, and test')
self.assertResponse('channel nicks --count', '3')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
``` |
{
"source": "joulez/phabula",
"score": 2
} |
#### File: phabula/tests/test_phabula.py
```python
import unittest
from phabula import phabula
class TestPhabula(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joumaico/coreutil",
"score": 4
} |
#### File: coreutil/algorithm/base.py
```python
def luhn(number: str) -> bool:
"""
Checksum formula to validate a variety of identification numbers.
Return `True` if valid, otherwise `False` if invalid.
"""
addend = ""
for i, j in enumerate(number[::-1]):
# loops starts at the end of the number
if (i + 1) % 2 == 0:
# multiplying by 2 all digits of even index
addend += str(int(j) * 2)
else:
addend += j
if sum(int(x) for x in addend) % 10 == 0:
return True
return False
```
#### File: coreutil/tests/test_card.py
```python
import coreutil
import unittest
class TestCard(unittest.TestCase):
def test_credit(self):
card = coreutil.card.credit('378282246310005')
self.assertTrue(card.isvalid())
self.assertEqual(card.network().short, 'amex')
self.assertEqual(card.network().brand, 'American Express')
``` |
{
"source": "joumanarahime/sqlalchemy-challenge",
"score": 3
} |
#### File: joumanarahime/sqlalchemy-challenge/app.py
```python
import numpy as np
import pandas as dp
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#engine = create_engine("sqlite:///Resources/hawaii.sqlite")
engine = create_engine("sqlite:////Users/joumanarahime/Documents/Vanderbilt BootCamp/sqlalchemy-challenge/Resources/hawaii.sqlite")
Base=automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session=Session(engine)
max_date = session.execute('select MAX(date) from measurement').fetchall()
max_date = max_date[0][0]
# Calculate the date 1 year ago from the last data point in the database
date_stamp = dt.datetime.strptime(max_date,'%Y-%m-%d')
year = date_stamp.year
month = date_stamp.month
day = date_stamp.day
prior_year = f'{year-1}-{month:02d}-{day:02d}'
#Create the app
app = Flask(__name__)
# index route
@app.route("/")
def home():
"""List of all available api routes."""
return (
f"Available Routes:<br/>"
f"------------------------<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/2016-01-30<br/>"
f"/api/v1.0/2016-01-30/2017-01-30"
)
# 4. /api/v1.0/precipitation
@app.route("/api/v1.0/precipitation")
def prec():
session=Session(engine)
results = session.query(Measurement.prcp, Measurement.date).filter(Measurement.date > prior_year).all()
session.close()
prec_data=[]
for result in results:
prec_dict = {result.date: result.prcp }
prec_data.append(prec_dict)
return jsonify(prec_data)
@app.route("/api/v1.0/stations")
def stations():
session=Session(engine)
results= session.execute('select station, count(*) as count from measurement group by station order by count(station) desc ').fetchall()
station_data=[]
for result in results:
station_dict = {result.station: result.count}
station_data.append(station_dict)
return jsonify(station_data)
@app.route("/api/v1.0/tobs")
def tobs():
session=Session(engine)
cal_temp = session.execute(f"select date, min(tobs), avg(tobs), max(tobs) from measurement where date> '{prior_year}'").fetchall()
temp_dict= {
"Date": cal_temp[0][0],
"Low Temp": cal_temp[0][1],
"Avg Temp": cal_temp[0][2],
"Highest Temp": cal_temp[0][3]
}
return jsonify(temp_dict)
@app.route("/api/v1.0/<start>")
def start_date(start):
session=Session(engine)
sel= [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results= (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date)>=start)
.all())
dates=[]
for result in results:
start_dict={
"Date": start,
"Low Temp": result[0],
"Avg Temp": result[1],
"Highest Temp": result[2]
}
dates.append(start_dict)
return jsonify(dates)
@app.route("/api/v1.0/<start>/<end>")
def start_end_date(start, end):
session=Session(engine)
sel= [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results= (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date)>=start)
.filter(func.strftime("%Y-%m-%d", Measurement.date)<=end)
.all())
dates=[]
for result in results:
startEnd_dict={}
startEnd_dict={
"Start Date": start,
"End Date": end,
"Low Temp": result[0],
"Avg Temp": result[1],
"Highest Temp": result[2]
}
dates.append(startEnd_dict)
return jsonify(dates)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jounaidr/apel",
"score": 2
} |
#### File: apel/bin/summariser.py
```python
from optparse import OptionParser
import ConfigParser
import logging.config
import os
import sys
import time
from apel.db import ApelDb, ApelDbException
from apel.common import set_up_logging, LOG_BREAK
from apel import __version__
def runprocess(db_config_file, config_file, log_config_file):
'''Parse the configuration file, connect to the database and run the
summarising process.'''
# Store start time to log how long the summarising process takes
start_time = time.time()
try:
# Read configuration from file
cp = ConfigParser.ConfigParser()
cp.read(config_file)
pidfile = cp.get('summariser', 'pidfile')
dbcp = ConfigParser.ConfigParser()
dbcp.read(db_config_file)
db_backend = dbcp.get('db', 'backend')
db_hostname = dbcp.get('db', 'hostname')
db_port = int(dbcp.get('db', 'port'))
db_name = dbcp.get('db', 'name')
db_username = dbcp.get('db', 'username')
db_password = dbcp.get('db', 'password')
except (ConfigParser.Error, ValueError, IOError), err:
print 'Error in configuration file %s: %s' % (config_file, str(err))
print 'The system will exit.'
sys.exit(1)
try:
db_type = dbcp.get('db', 'type')
except ConfigParser.Error:
db_type = 'cpu'
# set up logging
try:
if os.path.exists(log_config_file):
logging.config.fileConfig(log_config_file)
else:
set_up_logging(cp.get('logging', 'logfile'),
cp.get('logging', 'level'),
cp.getboolean('logging', 'console'))
log = logging.getLogger('summariser')
except (ConfigParser.Error, ValueError, IOError), err:
print 'Error configuring logging: %s' % str(err)
print 'The system will exit.'
sys.exit(1)
log.info('Starting apel summariser version %s.%s.%s', *__version__)
# If the pidfile exists, don't start up.
try:
if os.path.exists(pidfile):
log.error("A pidfile %s already exists.", pidfile)
log.warn("Check that the summariser is not running, then remove the file.")
raise Exception("The summariser cannot start while pidfile exists.")
except Exception, err:
print "Error initialising summariser: %s" % err
sys.exit(1)
try:
f = open(pidfile, "w")
f.write(str(os.getpid()))
f.write("\n")
f.close()
except IOError, e:
log.warn("Failed to create pidfile %s: %s", pidfile, e)
# If we fail to create a pidfile, don't start the summariser
sys.exit(1)
log.info('Created Pidfile')
# Log into the database
try:
log.info('Connecting to the database ... ')
db = ApelDb(db_backend, db_hostname, db_port, db_username, db_password, db_name)
log.info('Connected.')
# This is all the summarising logic, contained in ApelMysqlDb() and the stored procedures.
if db_type == 'cpu':
# Make sure that records are not coming from the same site by two different routes
# N.B. Disabled as check doesn't scale well and isn't that useful.
# db.check_duplicate_sites()
db.summarise_jobs()
db.normalise_summaries()
db.copy_summaries()
elif db_type == 'cloud':
db.summarise_cloud()
else:
raise ApelDbException('Unknown database type: %s' % db_type)
# Calculate end time to output time to logs
elapsed_time = round(time.time() - start_time, 3)
log.info('Summarising completed in: %s seconds', elapsed_time)
log.info(LOG_BREAK)
except ApelDbException, err:
log.error('Error summarising: ' + str(err))
log.error('Summarising has been cancelled.')
sys.exit(1)
finally:
# Clean up pidfile regardless of any excpetions
# This even executes if sys.exit() is called
log.info('Removing Pidfile')
try:
if os.path.exists(pidfile):
os.remove(pidfile)
else:
log.warn("pidfile %s not found.", pidfile)
except IOError, e:
log.warn("Failed to remove pidfile %s: %s", pidfile, e)
log.warn("The summariser may not start again until it is removed.")
log.info(LOG_BREAK)
if __name__ == '__main__':
# Main method for running the summariser.
ver = "APEL summariser %s.%s.%s" % __version__
opt_parser = OptionParser(description=__doc__, version=ver)
opt_parser.add_option('-d', '--db', help='the location of database config file',
default='/etc/apel/db.cfg')
opt_parser.add_option('-c', '--config', help='the location of config file',
default='/etc/apel/summariser.cfg')
opt_parser.add_option('-l', '--log_config', help='Location of logging config file (optional)',
default='/etc/apel/logging.cfg')
(options,args) = opt_parser.parse_args()
runprocess(options.db, options.config, options.log_config)
``` |
{
"source": "jounderwood/aiohttp-ws",
"score": 2
} |
#### File: aiohttp-ws/aiohttp_ws/__init__.py
```python
from .service import Service
from .actions_registry import ActionsRegistry
from .messages import send, send_directly, send_pubsub, broadcast
def aiohttp_ws():
return Service()
```
#### File: aiohttp-ws/aiohttp_ws/settings.py
```python
from yzconfig import YzConfig
class Settings(YzConfig):
PUBSUB_CHANNEL_NAME = 'aiohttp_ws'
HEARTBEAT = 30
AUTOPING = True
CAMEL_CASE_TRANSFORM = True
class RedisSettings(YzConfig):
HOST = None
PORT = 6379
DB = 0
MIN_POOL_SIZE = 1
MAX_POOL_SIZE = 10
def _check_settings(self):
assert self.HOST, "REDIS_HOST is empty"
settings = Settings('AIOHTTP_WS_')
redis_settings = RedisSettings('AIOHTTP_WS_REDIS_')
``` |
{
"source": "joungh93/PyRAF_GMOS_IFU",
"score": 3
} |
#### File: PyRAF_GMOS_IFU/analysis/linefit.py
```python
import numpy as np
import copy
from astropy.convolution import convolve
from astropy.convolution import Gaussian1DKernel
from scipy.special import erf
from scipy.stats import sigmaclip
from scipy.optimize import minimize
import emcee
import pandas as pd
import warnings
from astropy.cosmology import FlatLambdaCDM
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from scipy.stats import truncnorm
from scipy.stats import skew
from scipy.stats import kurtosis
from scipy.optimize import curve_fit
# ----- Function ----- #
def gauss_cdf_scale(x, mu, sigma, flux_scale):
dx = x[1] - x[0]
v1 = erf((x-mu+0.5*dx)/(np.sqrt(2.0)*sigma))
v2 = erf((x-mu-0.5*dx)/(np.sqrt(2.0)*sigma))
return flux_scale*(v1-v2)/(2.0*dx)
# ----- Class ----- #
class linefit:
def __init__(self, wavelength, binned_spectrum, binned_variance, binned_continuum,
line_numbers, redshift, dir_lines,
broad_component=False, data_vbin=None, data_bfac=None):#, data_gaussian=None):
'''
wavelength :
A wavelength data (1D array) on the observer-frame.
The array should be the data type of (n_wavelength,).
binned_spectrum :
A spectrum data (2D array) after running voronoi 2D binning.
The array should be the data type of (n_wavelength, n_bin).
binned_variance :
A variance data (2D array) after running voronoi 2D binning.
The array should be the data type of (n_wavelength, n_bin).
binned_continuum :
A continuum data (2D array) after running voronoi 2D binning & continuum fitting.
The array should be the data typde of (n_wavelength, n_bin).
line_numbers :
A number of line declaration
0 : [OII]3727/3729 line
1 : H beta line
2 : [OIII]4959/5007 line
3 : H alpha + [NII]6548/6584 line
4 : [SII]6717/6731 line
5 : [OI]6300 line
'''
# Basic settings
cosmo = FlatLambdaCDM(H0=70.0, Om0=0.3, Tcmb0=2.725)
self.dir_lines = dir_lines
self.redshift = redshift
self.lumdist = cosmo.luminosity_distance(self.redshift).value * 1.0e+6 # pc
self.c = 2.99792e+5 # km/s
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Reading the results of the integrated spectra
fit_itg = np.genfromtxt('linefit_integrated.txt', dtype=None, encoding='ascii', comments='#',
names=('line','mu','e_mu','lsig','e_lsig','vsig','e_vsig',
'R','e_R','flux','e_flux','rchisq'))
self.fit_itg = fit_itg
if broad_component:
self.data_vbin = data_vbin
self.data_bfac = data_bfac
# self.data_gaussian = data_gaussian
fit_itgb = np.genfromtxt('linefit_integrated_broad.txt', dtype=None, encoding='ascii', comments='#',
names=('line','mu','e_mu','lsig','e_lsig','vsig','e_vsig',
'R','e_R','flux','e_flux','rchisq','flxsum_scale'))
self.fit_itgb = fit_itgb
# Line declarations
self.line_num = line_numbers
if (line_numbers == 0):
lname = ['OII3727']
self.nlines = len(lname)
self.line_names = lname
self.line_wav = [fit_itg['mu'][fit_itg['line'] == l].item() for l in lname]
self.wav_fit = [self.line_wav[0]-10.0, self.line_wav[-1]+10.0]
if (line_numbers == 1):
lname = ['Hbeta']
self.nlines = len(lname)
self.line_names = lname
self.line_wav = [fit_itg['mu'][fit_itg['line'] == l].item() for l in lname]
if broad_component:
self.wav_fit = [self.line_wav[0]-50.0, self.line_wav[-1]+50.0]
else:
self.wav_fit = [self.line_wav[0]-10.0, self.line_wav[-1]+10.0]
if (line_numbers == 2):
lname = ['OIII4959', 'OIII5007']
self.nlines = len(lname)
self.line_names = lname
self.line_wav = [fit_itg['mu'][fit_itg['line'] == l].item() for l in lname]
if broad_component:
self.wav_fit = [self.line_wav[0]-20.0, self.line_wav[-1]+30.0]
else:
self.wav_fit = [self.line_wav[0]-10.0, self.line_wav[-1]+10.0]
if (line_numbers == 3):
lname = ['NII6548', 'Halpha', 'NII6584']
self.nlines = len(lname)
self.line_names = lname
self.line_wav = [fit_itg['mu'][fit_itg['line'] == l].item() for l in lname]
if broad_component:
self.wav_fit = [self.line_wav[0]-50.0, self.line_wav[-1]+40.0]
else:
self.wav_fit = [self.line_wav[0]-10.0, self.line_wav[-1]+10.0]
if (line_numbers == 4):
lname = ['SII6717', 'SII6731']
self.nlines = len(lname)
self.line_names = lname
self.line_wav = [fit_itg['mu'][fit_itg['line'] == l].item() for l in lname]
self.wav_fit = [self.line_wav[0]-10.0, self.line_wav[-1]+10.0]
if (line_numbers == 5):
lname = ['OI6300']
self.nlines = len(lname)
self.line_names = lname
self.line_wav = [fit_itg['mu'][fit_itg['line'] == l].item() for l in lname]
self.wav_fit = [self.line_wav[0]-10.0, self.line_wav[-1]+10.0]
# Data
self.wav_obs = wavelength
self.wav_res = self.wav_obs / (1.0+self.redshift)
self.spx_fit = [np.abs(self.wav_res-self.wav_fit[0]).argmin(),
np.abs(self.wav_res-self.wav_fit[1]).argmin()]
self.nbin = binned_spectrum.shape[1]
# Continuum subtraction
data0 = binned_spectrum - binned_continuum
vari0 = binned_variance
cont0 = binned_continuum
self.dat = data0 * (1.0+self.redshift)
self.var = vari0 * (1.0+self.redshift)**2.0
self.cont = cont0 * (1.0+self.redshift)
# Reading the spectral resolution fitting results
par, e_par = np.loadtxt('relation_wav_R.txt').T
self.par = par
self.e_par = e_par
# Velocity sigma range
Rmax = self.par[0] + self.par[1]*self.wav_obs[self.spx_fit[1]]
e_Rmax = np.sqrt(self.e_par[0]**2.0 + (self.e_par[1]*self.wav_obs[self.spx_fit[1]])**2.0)
vsig0 = self.c / (2.0*np.sqrt(2.0*np.log(2.0))*Rmax)
e_vsig0 = self.c*e_Rmax / (2.0*np.sqrt(2.0*np.log(2.0))*Rmax*Rmax)
self.vsig0 = vsig0
self.vsig_llim = 0.5*self.vsig0
if broad_component:
self.vsig_ulim = 5.0*self.vsig0
else:
self.vsig_ulim = 2.0*self.vsig0
def model_func(self, theta, x):
dx = x[1]-x[0]
val = 0.
for i in np.arange(self.nlines):
v1 = erf((x-theta[2*i+1]+0.5*dx)/(np.sqrt(2.0)*theta[0]))
v2 = erf((x-theta[2*i+1]-0.5*dx)/(np.sqrt(2.0)*theta[0]))
val += theta[2*i+2]*(v1-v2)/(2.0*dx)
return val
def log_likelihood(self, theta, x, y, yerr):
mod = self.model_func(theta, x)
sigma2 = yerr**2
return -0.5 * np.sum((y-mod)**2 / sigma2 + np.log(2*np.pi*sigma2))
def log_prior(self, theta, ibin):
# Basic conditions
icnd = 0
sig_cnd = ((theta[0] > self.vsig_llim/self.c) & (theta[0] < self.vsig_ulim/self.c))
icnd += 1*sig_cnd
spec_sum = np.sum(np.abs(self.dat[self.spx_fit[0]:self.spx_fit[1]+1, ibin])* \
(self.wav_res[1]-self.wav_res[0]))
for i in np.arange(self.nlines):
mu_cnd = ((theta[2*i+1] > np.log(self.wav_fit[0])) & \
(theta[2*i+1] < np.log(self.wav_fit[1])))
flx_cnd = ((theta[2*i+2] > 0.) & \
(theta[2*i+2] < 2.0*spec_sum))
icnd += (1*mu_cnd + 1*flx_cnd)
if (icnd == 2*self.nlines+1):
return_value = 0.
else:
return_value = -np.inf
# Specific conditions
gauss_pdf = lambda X, M, S: np.exp(-0.5*((X-M)/S)**2.)/(S*np.sqrt(2.*np.pi))
# Line 0, 1, 5: [OII]3727/3729, H beta, [OI]6300 (# of parameters = 3)
if (self.line_num in [0, 1, 5]):
vsig_init = self.fit_itg['vsig'][self.fit_itg['line'] == self.line_names[0]].item()
e_vsig_init = self.fit_itg['e_vsig'][self.fit_itg['line'] == self.line_names[0]].item()
fprior_sigma = np.log(gauss_pdf(theta[0], vsig_init/self.c, 10./self.c))
mu_init = self.fit_itg['mu'][self.fit_itg['line'] == self.line_names[0]].item()
e_mu_init = self.fit_itg['e_mu'][self.fit_itg['line'] == self.line_names[0]].item()
fprior_mu = np.log(gauss_pdf(theta[1], np.log(mu_init), 0.5/mu_init))
fprior_flx = 0.
# Line 2: [OIII]4959/5007 (# of parameters = 5)
if (self.line_num == 2):
vsig_init = self.fit_itg['vsig'][self.fit_itg['line'] == self.line_names[1]].item()
e_vsig_init = self.fit_itg['e_vsig'][self.fit_itg['line'] == self.line_names[1]].item()
fprior_sigma = np.log(gauss_pdf(theta[0], vsig_init/self.c, 10./self.c))
fprior_mu = 0.
mu_init_arr = np.array([])
for j in np.arange(self.nlines):
mu_init = self.fit_itg['mu'][self.fit_itg['line'] == self.line_names[j]].item()
e_mu_init = self.fit_itg['e_mu'][self.fit_itg['line'] == self.line_names[j]].item()
fprior_mu += np.log(gauss_pdf(theta[2*j+1], np.log(mu_init), 0.5/mu_init))
mu_init_arr = np.append(mu_init_arr, mu_init)
fprior_mu += np.log(gauss_pdf(theta[3]-theta[1], np.log(mu_init_arr[1]/mu_init_arr[0]),
0.01*np.log(mu_init_arr[1]/mu_init_arr[0])))
flx2_cnd = (theta[4]/theta[2] > 1.)
if flx2_cnd:
fprior_flx = np.log(gauss_pdf(theta[4]/theta[2], 3.0, 0.1)) # prior function for flux ratio
else:
fprior_flx = -np.inf
# Line 3: H alpha + [NII]6548/6584 (# of parameters = 7)
if (self.line_num == 3):
vsig_init = self.fit_itg['vsig'][self.fit_itg['line'] == self.line_names[1]].item()
e_vsig_init = self.fit_itg['e_vsig'][self.fit_itg['line'] == self.line_names[1]].item()
fprior_sigma = np.log(gauss_pdf(theta[0], vsig_init/self.c, 10./self.c))
fprior_mu = 0.
mu_init_arr = np.array([])
for j in np.arange(self.nlines):
mu_init = self.fit_itg['mu'][self.fit_itg['line'] == self.line_names[j]].item()
e_mu_init = self.fit_itg['e_mu'][self.fit_itg['line'] == self.line_names[j]].item()
fprior_mu += np.log(gauss_pdf(theta[2*j+1], np.log(mu_init), 0.5/mu_init))
mu_init_arr = np.append(mu_init_arr, mu_init)
fprior_mu += np.log(gauss_pdf(theta[3]-theta[1], np.log(mu_init_arr[1]/mu_init_arr[0]),
0.01*np.log(mu_init_arr[1]/mu_init_arr[0])))
fprior_mu += np.log(gauss_pdf(theta[5]-theta[3], np.log(mu_init_arr[2]/mu_init_arr[1]),
0.01*np.log(mu_init_arr[2]/mu_init_arr[1])))
flx2_cnd = ((theta[4]/theta[2] > 1.) & (theta[4]/theta[6] > 1.) & \
(theta[6]/theta[2] > 1.))
if flx2_cnd:
fprior_flx = np.log(gauss_pdf(theta[6]/theta[2], 3.0, 0.1)) # prior function for flux ratio
else:
fprior_flx = -np.inf
# Line 4: [SII]6717/6731 (# of parameters = 5)
if (self.line_num == 4):
vsig_init = self.fit_itg['vsig'][self.fit_itg['line'] == self.line_names[0]].item()
e_vsig_init = self.fit_itg['e_vsig'][self.fit_itg['line'] == self.line_names[0]].item()
fprior_sigma = np.log(gauss_pdf(theta[0], vsig_init/self.c, 10./self.c))
fprior_mu = 0.
mu_init_arr = np.array([])
for j in np.arange(self.nlines):
mu_init = self.fit_itg['mu'][self.fit_itg['line'] == self.line_names[j]].item()
e_mu_init = self.fit_itg['e_mu'][self.fit_itg['line'] == self.line_names[j]].item()
fprior_mu += np.log(gauss_pdf(theta[2*j+1], np.log(mu_init), 0.5/mu_init))
mu_init_arr = np.append(mu_init_arr, mu_init)
fprior_mu += np.log(gauss_pdf(theta[3]-theta[1], np.log(mu_init_arr[1]/mu_init_arr[0]),
0.01*np.log(mu_init_arr[1]/mu_init_arr[0])))
fprior_flx = 0.
return_value += (fprior_sigma + fprior_mu + fprior_flx)
return return_value
def solve(self, ibin, check=False, nwalkers=64, ndiscard=5000, nsample=5000,
fluct0=1.0e-3, fluct1=1.0e-3, fluct2=1.0e-3, broad_component=False):
ndim = 2*self.nlines+1
# Initial settings
nll = lambda *args: -self.log_likelihood(*args)
# for i in np.arange(self.nbin):
Xfit = self.wav_res[self.spx_fit[0]:self.spx_fit[1]+1]
Yfit = self.dat[self.spx_fit[0]:self.spx_fit[1]+1, ibin]
e_Yfit = np.sqrt(self.var[self.spx_fit[0]:self.spx_fit[1]+1, ibin])
spec_sum = np.sum(np.abs(Yfit)*(self.wav_res[1]-self.wav_res[0]))
# Broad component subtraction
if broad_component:
broad_sum = np.zeros_like(Yfit)
bfac = self.data_bfac[self.data_vbin == ibin][0]
if (bfac < 0.01):
broad_sum += 0.
elif (self.fit_itgb.size == 1):
bline = self.fit_itgb['line'].item()
if (self.line_num == int(bline[5])):
bpar = [self.fit_itgb['mu'].item(), self.fit_itgb['lsig'].item(),
bfac*self.fit_itgb['flux'].item()]
broad_sum += gauss_cdf_scale(Xfit, *bpar)
else:
broad_sum += 0.
else:
bline = self.fit_itgb['line']
bool_broad = pd.Series(bline).str.startswith(f"Broad{self.line_num:d}_").values
nbroad = np.sum(bool_broad)
if (nbroad == 0):
broad_sum += 0.
else:
for b in np.arange(nbroad):
comp = (bline == f"Broad{self.line_num:d}_{b+1:d}")
bpar = [self.fit_itgb['mu'][comp].item(), self.fit_itgb['lsig'][comp].item(),
bfac*self.fit_itgb['flux'][comp].item()]
broad_sum += gauss_cdf_scale(Xfit, *bpar)
Yfit = self.dat[self.spx_fit[0]:self.spx_fit[1]+1, ibin] - broad_sum
Xfit2 = np.log(Xfit)
Yfit2 = Xfit * Yfit
e_Yfit2 = Xfit * e_Yfit
# Finding the initial guess
initial = np.zeros(ndim)
initial[0] = self.vsig0 / self.c
for j in np.arange(self.nlines):
initial[2*j+1] = np.log(self.fit_itg['mu'][self.fit_itg['line'] == self.line_names[j]].item())
initial[2*j+2] = spec_sum
# Running MCMC
log_posterior = lambda theta, x, y, yerr: self.log_prior(theta, ibin) + self.log_likelihood(theta, x, y, yerr)
pos = np.zeros((nwalkers, ndim))
np.random.seed(0)
pos[:,0] = truncnorm.rvs((self.vsig_llim/self.c - initial[0]) / fluct0,
(self.vsig_ulim/self.c - initial[0]) / fluct0,
loc=initial[0], scale=fluct0, size=nwalkers)
for i in np.arange(1, ndim, 1):
if (i % 2 == 1):
pos[:,i] = truncnorm.rvs((np.log(self.wav_fit[0]) - initial[i]) / fluct1,
(np.log(self.wav_fit[1]) - initial[i]) / fluct1,
loc=initial[i], scale=fluct1, size=nwalkers)
elif (i % 2 == 0):
pos[:,i] = truncnorm.rvs((0. - initial[i]) / fluct2,
(2.0*spec_sum - initial[i]) / fluct2,
loc=initial[i], scale=fluct2, size=nwalkers)
# print(pos)
# pos = soln.x + fluct*np.random.randn(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior,
args=(Xfit2, Yfit2, e_Yfit2))
sampler.run_mcmc(pos, ndiscard+nsample, progress=True)
flat_samples = sampler.get_chain(discard=ndiscard, flat=True)
popt, perr, labels, g1, k1 = [], [], [], [], []
for j in np.arange(ndim):
mcmc = np.percentile(flat_samples[:, j],
[50-34.15, 50, 50+34.15])
popt.append(mcmc[1])
perr.append(0.5*(mcmc[2]-mcmc[0]))
labels.append(f'par{j+1:d}')
g1.append(skew(flat_samples[:, j]))
k1.append(kurtosis(flat_samples[:, j]))
popt = np.array(popt)
perr = np.array(perr)
g1 = np.array(g1)
k1 = np.array(k1)
popt2 = []
popt2.append(popt[0]*np.median(Xfit))
for j in 1+np.arange(ndim-1):
if (j % 2 == 1):
popt2.append(np.exp(popt[j]))
else:
popt2.append(popt[j])
popt2 = np.array(popt2)
if check:
# Histogram plot
fig = plt.figure(1, figsize=(10,10))
gs = GridSpec(3, 3, left=0.05, bottom=0.05, right=0.975, top=0.975,
height_ratios=[1.]*3, width_ratios=[1.]*3,
hspace=0.15, wspace=0.15)
for k in np.arange(ndim):
ax = fig.add_subplot(gs[k // 3, k % 3])
Y = flat_samples[:, k]
ax.hist(Y, bins=20, histtype='step', linewidth=2.5)
ax.tick_params(axis='both', labelsize=14.0)
ax.tick_params(labelleft=False)
ax.axvline(popt[k], color='k', linestyle='--', linewidth=2.5, alpha=0.7)
ax.text(0.05, 0.95, f"g1 = {g1[k]:.2f}", fontsize=16.0, fontweight='bold',
color='red', ha='left', va='top', transform=ax.transAxes)
ax.text(0.05, 0.85, f"k1 = {k1[k]:.2f}", fontsize=16.0, fontweight='bold',
color='green', ha='left', va='top', transform=ax.transAxes)
plt.savefig(self.dir_lines+f"check/line{self.line_num:d}_bin{ibin:d}.png", dpi=300)
plt.close()
# Spectra plot
fig = plt.figure(2, figsize=(12,9))
ax = fig.add_subplot(111)
ax.set_position([0.15,0.15,0.80,0.80])
ax.tick_params(axis='both', labelsize=20.0)
ax.set_xlim([Xfit[0]-25.0, Xfit[-1]+25.0])
Ydat = self.dat[self.spx_fit[0]:self.spx_fit[1]+1, ibin]
ax.set_ylim([np.min(Ydat)-1.0*np.abs(np.max(Ydat)), 1.25*np.abs(np.max(Ydat))])
ax.set_xlabel(r"Rest-frame wavelength [${\rm \AA}$]", fontsize=20.0)
ax.set_ylabel(r"Flux [${\rm 10^{-15}~erg~cm^{-2}~s^{-1}~\AA^{-1}}$]", fontsize=20.0)
ax.plot(self.wav_res, self.dat[:, ibin], linewidth=3.0, alpha=0.7)
ax.plot(self.wav_res, self.model_func(popt2, self.wav_res), linewidth=3.0, alpha=0.7)
resi = -0.7*np.abs(np.max(Ydat))+self.dat[:, ibin]-self.model_func(popt2, self.wav_res)
if broad_component:
broad_sum_totwav = np.zeros_like(self.wav_res)
if (self.fit_itgb.size == 1):
broad_totwav = gauss_cdf_scale(self.wav_res, self.fit_itgb['mu'].item(),
self.fit_itgb['lsig'].item(), bfac*self.fit_itgb['flux'].item())
if ((self.fit_itgb['mu'].item() > Xfit[0]-25.0) & (self.fit_itgb['mu'].item() < Xfit[-1]+25.0)):
ax.plot(self.wav_res, broad_totwav, linewidth=2.5, linestyle='--', color='red', alpha=0.6)
broad_sum_totwav += broad_totwav
else:
for b in np.arange(self.fit_itgb.size):
broad_totwav = gauss_cdf_scale(self.wav_res, self.fit_itgb['mu'][b],
self.fit_itgb['lsig'][b], self.fit_itgb['flux'][b]*bfac)
if ((self.fit_itgb['mu'][b] > Xfit[0]-25.0) & (self.fit_itgb['mu'][b] < Xfit[-1]+25.0)):
ax.plot(self.wav_res, broad_totwav, linewidth=2.5, linestyle='--', color='red', alpha=0.6)
broad_sum_totwav += broad_totwav
resi -= broad_sum_totwav
ax.plot(self.wav_res, resi, linewidth=2.5, color='green', alpha=0.6)
plt.savefig(self.dir_lines+f"check/line{self.line_num:d}_fit_bin{ibin:d}.png", dpi=300)
plt.close()
vsig = self.c*popt[0]
e_vsig = self.c*perr[0]
snr = popt[2::2] / perr[2::2]
Ymod = self.model_func(popt, np.log(self.wav_res))[self.spx_fit[0]:self.spx_fit[1]+1]
rchisq = []
for j in np.arange(self.nlines):
spx_line = [np.abs(Xfit2-(popt[1::2][j]-3*popt[0])).argmin(),
np.abs(Xfit2-(popt[1::2][j]+3*popt[0])).argmin()]
chisq = ((Yfit2-Ymod)/e_Yfit2)**2.
dof = len(Yfit2[spx_line[0]:spx_line[1]+1])-3
rchisq.append(np.sum(chisq[spx_line[0]:spx_line[1]+1]) / dof)
# rchisq = np.sum(((Yfit-Ymod)/e_Yfit)**2.) / (len(Yfit)-ndim)
if broad_component:
broad = bfac
else:
broad = 0.0
df = pd.DataFrame(data = {'line': self.line_names,
'mu': np.exp(popt[1::2]),#popt[0::3],
'e_mu': np.exp(popt[1::2])*perr[1::2],#perr[0::3],
'g1_mu': g1[1::2],
'k1_mu': k1[1::2],
'sigma': [popt[0]]*len(self.line_names),#popt[1::3],
'e_sigma': [perr[0]]*len(self.line_names),#perr[1::3],
'g1_sigma': [g1[0]]*len(self.line_names),
'k1_sigma': [k1[0]]*len(self.line_names),
'flux': popt[2::2],
'e_flux': perr[2::2],
'g1_flux': g1[2::2],
'k1_flux': k1[2::2],
'vsig': vsig,
'e_vsig': e_vsig,
'snr': snr,
'rchisq': rchisq,
'broad': broad})
return df
if (__name__ == '__main__'):
import numpy as np
import glob, os
from matplotlib import pyplot as plt
from astropy.io import fits
# ----- Basic parameters ----- #
redshift = 0.3424
dir_vbin = 'vorbin/'
dir_lines = 'lines3/'
if (glob.glob(dir_lines) == []):
os.system("mkdir "+dir_lines)
if (glob.glob(dir_lines+"check/") == []):
os.system("mkdir "+dir_lines+"check/")
# ----- Loading Voronoi binned data ----- #
vb = np.load(dir_vbin+'vorbin_array.npz')
# wav, sci, var
data_vbin = fits.getdata(dir_vbin+'vbin.fits').astype('int')
nvbin = np.unique(data_vbin).size-1
data_bfac = fits.getdata('bfac_2D.fits')
l0 = linefit(vb['wav'], vb['sci'], vb['var'], vb['cont'], 0, redshift, dir_lines)
l1 = linefit(vb['wav'], vb['sci'], vb['var'], vb['cont'], 1, redshift, dir_lines,
broad_component=True, data_vbin=data_vbin, data_bfac=data_bfac)
l2 = linefit(vb['wav'], vb['sci'], vb['var'], vb['cont'], 2, redshift, dir_lines)
l3 = linefit(vb['wav'], vb['sci'], vb['var'], vb['cont'], 3, redshift, dir_lines,
broad_component=True, data_vbin=data_vbin, data_bfac=data_bfac)
l4 = linefit(vb['wav'], vb['sci'], vb['var'], vb['cont'], 4, redshift, dir_lines)
test_ibin = [0]
for ibin in test_ibin:
df0 = l0.solve(ibin, check=True, nwalkers=50,
ndiscard=1000, nsample=1000,
fluct0=1.0e-6, fluct1=1.0e-7, fluct2=1.0e-4)
theta0 = df0['sigma'].values[0]
for ln in np.arange(l0.nlines):
theta0 = np.append(theta0, np.log(df0['mu'].values[ln]))
theta0 = np.append(theta0, df0['flux'].values[ln])
print(l0.log_prior(theta0, ibin))
# df1 = l1.solve(ibin, check=True, nwalkers=50,
# ndiscard=1000, nsample=1000,
# fluct0=1.0e-7, fluct1=1.0e-7, fluct2=1.0e-4, broad_component=True)
# theta1 = df1['sigma'].values[0]
# for ln in np.arange(l1.nlines):
# theta1 = np.append(theta1, np.log(df1['mu'].values[ln]))
# theta1 = np.append(theta1, df1['flux'].values[ln])
# print(l1.log_prior(theta1, ibin))
# df2 = l2.solve(ibin, check=True, nwalkers=50,
# ndiscard=1000, nsample=1000,
# fluct0=1.0e-6, fluct1=1.0e-7, fluct2=1.0e-4)
# theta2 = df2['sigma'].values[0]
# for ln in np.arange(l2.nlines):
# theta2 = np.append(theta2, np.log(df2['mu'].values[ln]))
# theta2 = np.append(theta2, df2['flux'].values[ln])
# print(l2.log_prior(theta2, ibin))
df3 = l3.solve(ibin, check=True, nwalkers=50,
ndiscard=1000, nsample=1000,
fluct0=1.0e-6, fluct1=1.0e-7, fluct2=1.0e-4, broad_component=True)
theta3 = df3['sigma'].values[0]
for ln in np.arange(l3.nlines):
theta3 = np.append(theta3, np.log(df3['mu'].values[ln]))
theta3 = np.append(theta3, df3['flux'].values[ln])
print(l3.log_prior(theta3, ibin))
# df4 = l4.solve(ibin, check=True, nwalkers=50,
# ndiscard=1000, nsample=1000,
# fluct0=1.0e-6, fluct1=1.0e-7, fluct2=1.0e-4)
# theta4 = df4['sigma'].values[0]
# for ln in np.arange(l4.nlines):
# theta4 = np.append(theta4, np.log(df4['mu'].values[ln]))
# theta4 = np.append(theta4, df4['flux'].values[ln])
# print(l4.log_prior(theta4, ibin))
```
#### File: PyRAF_GMOS_IFU/analysis/plt1_contour.py
```python
import time
start_time = time.time()
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import copy
from astropy.io import fits
from astropy import wcs
import imgscl
from scipy import ndimage
from skimage.registration import phase_cross_correlation
from astropy.cosmology import FlatLambdaCDM
import init_cfg as ic
# ----- Loading the original HST image ----- #
# Directories
dir_fig = ic.cpath+"diagram/linefits/"
diI = "/data/jlee/DATA/HLA/McPartland+16/MACS0916/JFG1/Phot/"
diG = ic.dir_redux
# Reading central RA & Dec
hdr1 = fits.getheader(ic.cube_list[0], ext=0)
gra, gdec, gpa = hdr1['RA'], hdr1['DEC'], hdr1['PA']
# Reading FITS images and creating RGB data
#img435 = fits.getdata(diI+'alw435NE_drc.fits')
img606 = fits.getdata(diI+'606_ori.fits', ext=0)
img814 = fits.getdata(diI+'814_ori.fits', ext=0)
cimg = np.zeros((img814.shape[0], img814.shape[1], 3), dtype=float)
cimg[:,:,0] = imgscl.linear(0.5*img814, scale_min=-0.02, scale_max=0.075) # R
cimg[:,:,1] = imgscl.linear(0.5*(img606+img814), scale_min=-0.02, scale_max=0.15) # G
cimg[:,:,2] = imgscl.linear(0.5*img606, scale_min=-0.02, scale_max=0.075) # B
# WCS to XY
h = fits.getheader(diI+'606_ori.fits', ext=0)
w = wcs.WCS(h)
px, py = w.wcs_world2pix(gra, gdec, 1)
# Color map
rth = 150.0
img = cimg[int(np.round(py)-1-rth):int(np.round(py)-1+rth),
int(np.round(px)-1-rth):int(np.round(px)-1+rth),:]
# ----- Rotating the image ----- #
rotated_img = ndimage.rotate(img, gpa)
# img -> rotated_img
# ----- Background values ----- #
m0, m1, m2 = np.nanmean(img[260:380, 250:370], axis=(0,1))
s0, s1, s2 = np.std(img[260:380, 250:370], axis=(0,1))
# print(m0, m1, m2)
# print(s0, s1, s2)
# ----- Putting pixel values to no-signal pixels ----- #
rimg = copy.deepcopy(rotated_img)
no_sign = (rimg == [0., 0., 0.])
no_sign_idx = np.argwhere(no_sign)
for i in np.arange(no_sign_idx.shape[0]):
rimg[no_sign_idx[i,0], no_sign_idx[i,1], :] = np.random.normal([m0, m1, m2], [s0, s1, s2])
# img -> rotated_img -> rimg
# ----- Reading H alpha data ----- #
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
redshift = ic.redshift
dist_lum = cosmo.luminosity_distance(redshift).value*1.0e+6 # pc
c = 2.99792e+5 # km/s
ang_scale = cosmo.kpc_proper_per_arcmin(redshift).value / 60. # kpc/arcsec
pixel_scale = 0.1 # arcsec/pix
dir_Ha = ic.cpath+'lines3/Halpha/'
Ha_flx = fits.getdata(dir_Ha+'flux_2D.fits', ext=0)
Ha_snr = fits.getdata(dir_Ha+'snr_2D.fits', ext=0)
Ha_sig = fits.getdata(dir_Ha+'sigma_2D.fits', ext=0)
Ha_rchisq = fits.getdata(dir_Ha+'rchisq_2D.fits', ext=0)
Ha_snrpix = fits.getdata(dir_Ha+'snrpix_2D.fits', ext=0)
snr_cnd = ((Ha_snr < 3.0) | (Ha_snrpix < 3.0))
rchisq_cnd = (Ha_rchisq > 50.)
zero_cnd = (snr_cnd | rchisq_cnd)
Ha_flx[snr_cnd] = 0.
sflx = ndimage.gaussian_filter(Ha_flx, sigma=(1.25, 1.25), order=0)
# ----- Making cutout image for 2x2 binning ----- #
hstimg = rimg[int(0.5*(rimg.shape[0]-1)-Ha_flx.shape[0]):int(0.5*(rimg.shape[0]-1)+Ha_flx.shape[0]),
int(0.5*(rimg.shape[1]-1)-Ha_flx.shape[1]):int(0.5*(rimg.shape[1]-1)+Ha_flx.shape[1]), :]
# img -> rotated_img -> rimg -> hstimg
# ----- 2x2 binning for matching the pixel scale ----- #
bin_nx, bin_ny = 2, 2
hstimg_binned = np.zeros((hstimg.shape[0]//bin_ny,
hstimg.shape[1]//bin_nx,
hstimg.shape[2]))
for y in np.arange(int(hstimg.shape[0]/bin_ny)):
for x in np.arange(int(hstimg.shape[1]/bin_nx)):
hstimg_binned[y, x, :] = np.mean(hstimg[bin_ny*y:bin_ny*(y+1), bin_nx*x:bin_nx*(x+1), :], axis=(0,1))
# img -> rotated_img -> rimg -> hstimg -> hstimg_binned
# ----- Making the 2D image ----- #
hstimg_2D = np.sum(hstimg_binned, axis=2)
# ----- Running the 2D correlation ----- #
shifted, error, diffphase = phase_cross_correlation(np.flip(Ha_flx, axis=0), hstimg_2D, upsample_factor=100)
print(shifted)
corr_hstimg = ndimage.shift(hstimg, shift=(2*shifted[0], 2*shifted[1], 0), mode='nearest')
Y_coord = pixel_scale*(np.arange(hstimg_2D.shape[0], step=1)-hstimg_2D.shape[0]/2)
X_coord = pixel_scale*(np.arange(hstimg_2D.shape[1], step=1)-hstimg_2D.shape[1]/2)
# img -> rotated_img -> rimg -> hstimg -> hstimg_binned -> corr_hstimg
# ----- Plotting figure ----- #
def plot_contour(hst_Data, sflux_Data, out, legend_position='lower left',
x0=-2.75, y0=1.25, sign=-1, L=0.6, theta0=gpa*(np.pi/180.0),
xN=-1.90, yN=1.25, xE=-2.95, yE=2.10):
fig, ax = plt.subplots(1, 1, figsize=(8,5))
ax.set_xlim([-3.4, 3.4])
ax.set_ylim([-2.45, 2.45])
ax.set_xticks([-3,-2,-1,0,1,2,3])
ax.set_yticks([-2,-1,0,1,2])
ax.set_xticklabels([r'$-3$',r'$-2$',r'$-1$',0,1,2,3], fontsize=15.0)
ax.set_yticklabels([r'$-2$',r'$-1$',0,1,2], fontsize=15.0)
ax.set_xlabel('arcsec', fontsize=15.0)
ax.set_ylabel('arcsec', fontsize=15.0)
ax.tick_params(width=1.0, length=5.0)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1.0)
# HST image
im = ax.imshow(hst_Data, extent=[-3.4,3.4,-2.45,2.45],
origin='lower', aspect='equal')
sig = np.std(sflux_Data[sflux_Data > 0.])
lvs = [0.5*sig, 1.*sig, 2.*sig, 3.*sig, 4.*sig]
lws = tuple(np.repeat(3.75, len(lvs)-1))
cs = tuple(['magenta']*(len(lvs)-1))
# GMOS/IFU contour
ax.contour(X_coord, Y_coord[::-1], sflx, levels=lvs, linewidths=lws, colors=cs, alpha=0.7)
p0, = ax.plot(-100.0, -100.0, '-', linewidth=lws[0], color=cs[0], alpha=0.7,
label=r"H${\rm \alpha}$ flux contour")
# The orientations
ax.arrow(x0+sign*0.025, y0, L*np.sin(theta0), L*np.cos(theta0), width=0.06,
head_width=0.18, head_length=0.18, fc='yellow', ec='yellow', alpha=0.9)
ax.arrow(x0, y0+sign*0.025, -L*np.cos(theta0), L*np.sin(theta0), width=0.06,
head_width=0.18, head_length=0.18, fc='yellow', ec='yellow', alpha=0.9)
ax.text(xE, yE, 'E', fontsize=17.5, fontweight='bold', color='yellow')
ax.text(xN, yN, 'N', fontsize=17.5, fontweight='bold', color='yellow')
# Scale bar
kpc5 = 5.0 / ang_scale
ax.arrow(2.0, -1.85, kpc5, 0., width=0.07, head_width=0., head_length=0.,
fc='yellow', ec='yellow', alpha=0.9)
ax.text(2.1, -2.2, '5 kpc', fontsize=17.5, fontweight='bold', color='yellow')
# Legend
ax.legend(handles=[p0], fontsize=13.0, loc=legend_position,
handlelength=2.5, frameon=True, borderpad=0.8,
framealpha=0.8, edgecolor='gray')
plt.tight_layout()
plt.savefig(out+'.pdf', dpi=300)
plt.savefig(out+'.png', dpi=300)
plt.close()
plot_contour(corr_hstimg, sflx, dir_fig+"Halpha_contour", legend_position='lower left',
x0=3.00, y0=1.25, sign=-1, L=0.6, theta0=gpa*(np.pi/180.0),
xN=3.00, yN=2.10, xE=1.95, yE=1.30)
# ----- Saving the results ----- #
np.savez('contour_data.npz', img=corr_hstimg, sflx=sflx, x=X_coord, y=Y_coord)
# Printing the running time
print('\n')
print('--- %s seconds ---' %(time.time()-start_time))
``` |
{
"source": "joungh93/SEDfit_prospector",
"score": 2
} |
#### File: joungh93/SEDfit_prospector/build_obj.py
```python
import time, sys, os
import h5py
import numpy as np
import scipy
from matplotlib import pyplot as pyplot
import fsps
import sedpy
import prospect
import emcee
def build_obs(filternames, mags, ldist, phot_mask,
e_mags=None, snr=10.0, **extras):
"""Build a dictionary of observational data.
:param filternames:
The names of the relevant filters,
in the same order as the photometric data.
(data type: list)
:param mags:
The AB apparent magnitudes of the object from the relevant filters.
(data type: numpy.array)
:param e_mags:
The uncertainties of mags. If None, SNR will be used to determine the uncertainties.
(data type: numpy.array)
:param ldist:
The luminosity distance to assume for translating apparent magnitudes
into absolute magnitudes. [unit: Mpc]
(data type: scalar)
:param phot_mask:
IMPORTANT: the mask is *True* for values that you *want* to fit,
and *False* for values you want to ignore.
(data type: numpy.array boolean)
:returns obs:
A dictionary of observational data to use in the fit.
"""
from prospect.utils.obsutils import fix_obs
import sedpy
# The obs dictionary, empty for now
obs = {}
# Filter objects in the "filters" key of the `obs` dictionary
obs["filters"] = sedpy.observate.load_filters(filternames)
# Now we store the measured fluxes for a single object, **in the same order as "filters"**
# The units of the fluxes need to be maggies (Jy/3631) so we will do the conversion here too.
obs["maggies"] = 10**(-0.4*mags)
# And now we store the uncertainties (again in units of maggies)
if np.sum(e_mags == None):
obs["maggies_unc"] = (1./snr) * obs["maggies"]
else:
obs["maggies_unc"] = 0.4*np.log(10.0)*obs["maggies"]*e_mags
# Now we need a mask, which says which flux values to consider in the likelihood.
obs["phot_mask"] = phot_mask
# This is an array of effective wavelengths for each of the filters.
# It is not necessary, but it can be useful for plotting so we store it here as a convenience
obs["phot_wave"] = np.array([f.wave_effective for f in obs["filters"]])
# We do not have a spectrum, so we set some required elements of the obs dictionary to None.
# (this would be a vector of vacuum wavelengths in angstroms)
obs["wavelength"] = None
# (this would be the spectrum in units of maggies)
obs["spectrum"] = None
# (spectral uncertainties are given here)
obs['unc'] = None
# (again, to ignore a particular wavelength set the value of the
# corresponding elemnt of the mask to *False*)
obs['mask'] = None
# This function ensures all required keys are present in the obs dictionary,
# adding default values if necessary
obs = fix_obs(obs)
return obs
def build_model(object_redshift=None, luminosity_distance=0.0, fixed_metallicity=None,
fixed_dust2=None, add_duste=False, add_neb=False,
f_agn=None,
mass_0=1.0e+8, logzsol_0=-0.5, dust2_0=0.05, tage_0=13., tau_0=1.,
mass_1=1.0e+7, logzsol_1=0.5, dust2_1=0.5, tage_1=5., tau_1=3.,
mass_2=1.0e+6, logzsol_2=0.1, dust2_2=0.1, tage_2=2., tau_2=1.,
**extras):
"""Build a prospect.models.SedModel object
:param object_redshift: (optional, default: None)
If given, produce spectra and observed frame photometry appropriate
for this redshift. Otherwise, the redshift will be zero.
:param luminosity_distance: (optional, default: 0.0)
The luminosity distance (in Mpc) for the model. Spectra and observed
frame (apparent) photometry will be appropriate for this luminosity distance.
:param fixed_metallicity: (optional, default: None)
If given, fix the model metallicity (:math:`log(Z/Z_sun)`) to the given value.
:param fixed_dust2: (optional, default: None)
If given, fix the diffuse dust parameter to the initially given value.
:param add_duste: (optional, default: False)
If `True`, add dust emission and associated (fixed) parameters to the model.
:param add_neb: (optional, default: False)
If `True`, add (fixed) parameters relevant for nebular emission, and
turn nebular emission on.
:returns model:
An instance of prospect.models.SedModel
"""
from prospect.models.sedmodel import SedModel
from prospect.models.templates import TemplateLibrary
from prospect.models import priors
# --- Get a basic delay-tau SFH parameter set. ---
# This has 5 free parameters:
# "mass", "logzsol", "dust2", "tage", "tau"
# And two fixed parameters
# "zred"=0.1, "sfh"=4
# See the python-FSPS documentation for details about most of these
# parameters. Also, look at `TemplateLibrary.describe("parametric_sfh")` to
# view the parameters, their initial values, and the priors in detail.
model_params = TemplateLibrary["parametric_sfh"]
# Add lumdist parameter. If this is not added then the distance is
# controlled by the "zred" parameter and a WMAP9 cosmology.
if luminosity_distance > 0:
model_params["lumdist"] = {"N": 1, "isfree": False,
"init": luminosity_distance, "units":"Mpc"}
# Adjust model initial values (only important for optimization or emcee)
model_params["zred"]["init"] = 0.0
model_params["mass"]["init"] = mass_0
model_params["logzsol"]["init"] = logzsol_0
model_params["dust2"]["init"] = dust2_0
model_params["tage"]["init"] = tage_0
model_params["tau"]["init"] = tau_0
# If we are going to be using emcee, it is useful to provide an
# initial scale for the cloud of walkers (the default is 0.1)
# For dynesty these can be skipped
model_params["mass"]["init_disp"] = mass_1
model_params["mass"]["disp_floor"] = mass_2
model_params["logzsol"]["init_disp"] = logzsol_1
model_params["logzsol"]["disp_floor"] = logzsol_2
model_params["dust2"]["init_disp"] = dust2_1
model_params["dust2"]["disp_floor"] = dust2_2
model_params["tage"]["init_disp"] = tage_1
model_params["tage"]["disp_floor"] = tage_2
model_params["tau"]["init_disp"] = tau_1
model_params["tau"]["disp_floor"] = tau_2
# adjust priors
model_params["mass"]["prior"] = priors.LogUniform(mini=1.0e+6, maxi=1.0e+12)
model_params["logzsol"]["prior"] = priors.TopHat(mini=-2.0, maxi=0.3)
model_params["dust2"]["prior"] = priors.TopHat(mini=0.0, maxi=4.0)
model_params["tage"]["prior"] = priors.TopHat(mini=0.001, maxi=13.8)
model_params["tau"]["prior"] = priors.LogUniform(mini=0.1, maxi=30.0)
# Change the model parameter specifications based on some keyword arguments
if fixed_metallicity is not None:
# make it a fixed parameter
model_params["logzsol"]["isfree"] = False
#And use value supplied by fixed_metallicity keyword
model_params["logzsol"]["init"] = fixed_metallicity
if fixed_dust2 is not None:
# make it a fixed parameter
model_params["dust2"]["isfree"] = False
model_params["dust2"]["init"] = fixed_dust2 # initially given value: 0.6
if object_redshift is not None:
# make sure zred is fixed
model_params["zred"]["isfree"] = False
# And set the value to the object_redshift keyword
model_params["zred"]["init"] = object_redshift
if add_duste:
# Add dust emission (with fixed dust SED parameters)
model_params.update(TemplateLibrary["dust_emission"])
if add_neb:
# Add nebular emission (with fixed parameters)
model_params.update(TemplateLibrary["nebular"])
if f_agn is not None:
# Add AGN component (with fixed parameters)
model_params.update(TemplateLibrary["agn"])
model_params["fagn"]["isfree"] = False
model_params["fagn"]["init"] = f_agn
# Now instantiate the model using this new dictionary of parameter specifications
model = SedModel(model_params)
return model
def build_sps(zcontinuous=1, **extras):
"""
:param zcontinuous:
A value of 1 insures that we use interpolation between SSPs to
have a continuous metallicity parameter (`logzsol`)
See python-FSPS documentation for details
"""
from prospect.sources import CSPSpecBasis
sps = CSPSpecBasis(zcontinuous=zcontinuous)
return sps
def build_noise(**extras):
return None, None
``` |
{
"source": "JoungheeKim/emotion_recognition",
"score": 2
} |
#### File: src/dataset/sentiment_dataset.py
```python
from .audio_dataset import AudioProcessor, AudioFeatureConfig
from .language_dataset import LanguageProcessor, LanguageFeatureConfig
from src.configs import DefaultConfig
from src.process.label_preprocess import LabelPreprocessConfig, LabelProcessor
from src.process.split_preprocess import SplitProcessor, SplitPreprocessConfig
import os
import torch
from src.utils import load_data
from torch.utils.data import (
DataLoader, Dataset
)
import logging
import numpy as np
import sys
from typing import Tuple
import pickle
from pathlib import Path
from pydub import AudioSegment, effects
import soundfile as sf
import logging
import numpy as np
import librosa
def load_with_dump(base_cfg:DefaultConfig, split_name:str):
dump_path = base_cfg.dump_path
if not os.path.isdir(dump_path):
dump_path = os.path.dirname(dump_path)
file_path = os.path.join(dump_path, "{}.pkl".format(split_name))
if os.path.isfile(file_path):
logging.info('load dump file [{}]'.format(file_path))
with open(file_path, 'rb') as f:
return pickle.load(f)
return None
def save_with_dump(data:Tuple, base_cfg:DefaultConfig, split_name:str):
dump_path = base_cfg.dump_path
if not os.path.isdir(dump_path):
dump_path = os.path.dirname(dump_path)
if dump_path is None or dump_path == '':
return
os.makedirs(dump_path, exist_ok=True)
file_path = os.path.join(dump_path, "{}.pkl".format(split_name))
with open(file_path, 'wb') as f:
pickle.dump(data, f)
logging.info('save dump file [{}]'.format(file_path))
class SentimentDataset(Dataset):
def __init__(self, audio_data, language_data, add_language_data, label_data, max_audio_size=None, max_language_size=None, vq_wav2vec=False):
self.audio_data = audio_data
self.language_data = language_data
self.add_language_data = add_language_data
self.label_data = label_data
self.max_audio_size = (
max_audio_size if max_audio_size is not None else sys.maxsize
)
self.max_language_size = (
max_language_size if max_language_size is not None else sys.maxsize
)
self.vq_wav2vec=vq_wav2vec
self.pad_idx=0.0
if vq_wav2vec:
self.pad_idx = 1
self.max_audio_size=2048
temp_folder = 'temp'
os.makedirs(temp_folder, exist_ok=True)
self.temp_path = os.path.join(temp_folder, "{}.wav".format(self.__class__.__name__))
self.del_silence = True
self.default_sample_rate=16000
def __len__(self):
return len(self.label_data)
def _normalize_audio(self, audio_path, temp_path):
audio_extension = Path(audio_path).suffix.replace('.', '')
sound_Obj = AudioSegment.from_file(audio_path, format=audio_extension)
sound_Obj = sound_Obj.set_channels(1)
sound_Obj = sound_Obj.set_frame_rate(self.default_sample_rate)
#sound_Obj = effects.normalize(sound_Obj)
sound_Obj.export(temp_path, format="wav")
def _load_audio(self, audio_path):
temp_path = self.temp_path
audio_extension = Path(audio_path).suffix.replace('.', '')
if audio_extension == 'raw':
wav, curr_sample_rate = sf.read(audio_path, channels=1, samplerate=self.default_sample_rate, format='RAW', subtype='PCM_16')
else:
self._normalize_audio(audio_path, temp_path)
wav, curr_sample_rate = sf.read(temp_path)
if self.del_silence:
non_silence_indices = librosa.effects.split(wav, top_db=30)
wav = np.concatenate([wav[start:end] for start, end in non_silence_indices])
return wav
def __getitem__(self, idx):
audio = self.audio_data[idx]
if type(audio) == str:
audio = self._load_audio(audio)
source = {
'audio' : audio,
'language': self.language_data[idx],
'add_language' : self.add_language_data[idx] if self.add_language_data is not None else None,
'label': self.label_data[idx],
}
return source
@classmethod
def load_with_config(cls, cfg, split_name='train'):
split_names = ['train', 'valid', 'test']
split_ids = {name:idx for idx, name in enumerate(split_names)}
assert split_name in split_names, 'please insert valid split name [{}]'.format(split_names)
base_cfg = cfg.base
ad_cfg = cfg.audio_feature
lm_cfg = cfg.language_feature
label_cfg = cfg.label
split_cfg = cfg.split
tuple_data = load_with_dump(base_cfg, split_name)
if tuple_data is not None:
audio_data, language_data, add_language_data, label_data, vq_wav2vec = tuple_data
return cls(audio_data, language_data, add_language_data, label_data, vq_wav2vec=vq_wav2vec)
## load dataset
df = load_data(base_cfg)
splitProcessor = SplitProcessor(split_cfg)
split_dfs = splitProcessor.get_split_data(df)
df = split_dfs[split_ids[split_name]]
## 이건 테스트 일단 추가용(실험)
#print("train")
#df = df[:int(len(df)/100)]
#############################################
## label process
labelProcessor = LabelProcessor(label_cfg)
df = labelProcessor.convert_data(df)
## convert audio data
audioProcessor = AudioProcessor(ad_cfg)
df = audioProcessor.convert_data(df, split_name)
## convert language data
languageProcessor = LanguageProcessor(lm_cfg)
df = languageProcessor.convert_data(df, split_name)
audio_data = audioProcessor.get_data(df)
label_data = labelProcessor.get_data(df)
language_data = languageProcessor.get_data(df, split_name)
add_language_data = languageProcessor.get_add_data(df)
logging.info("loaded dataset [{}]".format(split_name))
vq_wav2vec=audioProcessor.is_vq_wav2vec()
save_with_dump((audio_data, language_data, add_language_data, label_data, vq_wav2vec), base_cfg, split_name)
return cls(audio_data, language_data, add_language_data, label_data, vq_wav2vec=vq_wav2vec)
def collater(self, batch):
## audio
audios = [b['audio'] for b in batch]
channel = audios[0].shape[1] if len(audios[0].shape) >1 else False
sizes = [len(s) for s in audios]
target_size = min(max(sizes), self.max_audio_size)
if channel:
collated_audios = torch.zeros(len(audios), target_size, channel)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
)
for i, (audio, size) in enumerate(zip(audios, sizes)):
audio = torch.tensor(audio, dtype=torch.float)
diff = size - target_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
collated_audios[i] = torch.cat(
[audio, audio.new_full((-diff,channel), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_audios[i] = self.crop_to_max_size(audio, target_size)
else:
collated_audios = torch.zeros(len(audios), target_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
)
for i, (audio, size) in enumerate(zip(audios, sizes)):
audio = torch.tensor(audio, dtype=torch.float)
diff = size - target_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
collated_audios[i] = torch.cat(
[audio, audio.new_full((-diff,), self.pad_idx)]
)
padding_mask[i, diff:] = True
else:
collated_audios[i] = self.crop_to_max_size(audio, target_size)
if self.vq_wav2vec:
collated_audios=collated_audios.long()
## language
tokens = [b['language'] for b in batch]
input_ids = None
token_type_ids = None
attention_mask = None
if type(tokens[0]) != str:
channel = tokens[0].shape[1] if len(tokens[0].shape) > 1 else False
sizes = [len(s) for s in tokens]
target_size = min(max(sizes), self.max_language_size)
#print("channel 머냐?", channel)
if channel:
input_ids = torch.zeros(len(tokens), target_size, channel)
attention_mask = (
torch.BoolTensor(input_ids.shape[:2]).fill_(True)
)
for i, (token, size) in enumerate(zip(tokens, sizes)):
token = torch.tensor(token, dtype=torch.float)
diff = size - target_size
if diff == 0:
input_ids[i] = token
elif diff < 0:
input_ids[i] = torch.cat(
[token, token.new_full((-diff, channel), 0.0)]
)
attention_mask[i, diff:] = False
else:
input_ids[i] = self.crop_to_max_size(token, target_size)
else:
input_ids = torch.zeros(len(tokens), target_size).long()
attention_mask = (
torch.LongTensor(input_ids.shape).fill_(1)
)
token_type_ids = (
torch.LongTensor(input_ids.shape).fill_(0)
)
for i, (token, size) in enumerate(zip(tokens, sizes)):
token = torch.tensor(token, dtype=torch.float)
diff = size - target_size
if diff == 0:
input_ids[i] = token
elif diff < 0:
input_ids[i] = torch.cat(
[token, token.new_full((-diff,), 0)]
)
attention_mask[i, diff:] = 0
else:
input_ids[i] = self.crop_to_max_size(token, target_size)
## add language
add_tokens = [b['add_language'] for b in batch]
add_input_ids = None
add_token_type_ids = None
add_attention_mask = None
if type(add_tokens[0]) != str and add_tokens[0] is not None:
channel = add_tokens[0].shape[1] if len(add_tokens[0].shape) > 1 else False
sizes = [len(s) for s in add_tokens]
target_size = min(max(sizes), self.max_language_size)
if channel:
add_input_ids = torch.zeros(len(add_tokens), target_size, channel)
add_attention_mask = (
torch.BoolTensor(add_input_ids.shape[:2]).fill_(True)
)
for i, (add_token, size) in enumerate(zip(add_tokens, sizes)):
add_token = torch.tensor(add_token, dtype=torch.float)
diff = size - target_size
if diff == 0:
add_input_ids[i] = add_token
elif diff < 0:
add_input_ids[i] = torch.cat(
[add_token, add_token.new_full((-diff, channel), 0.0)]
)
add_attention_mask[i, diff:] = False
else:
add_input_ids[i] = self.crop_to_max_size(add_token, target_size)
else:
add_input_ids = torch.zeros(len(add_tokens), target_size).long()
add_attention_mask = (
torch.LongTensor(add_input_ids.shape).fill_(1)
)
add_token_type_ids = (
torch.LongTensor(add_input_ids.shape).fill_(0)
)
for i, (add_token, size) in enumerate(zip(add_tokens, sizes)):
add_token = torch.tensor(add_token, dtype=torch.float)
diff = size - target_size
if diff == 0:
add_input_ids[i] = add_token
elif diff < 0:
add_input_ids[i] = torch.cat(
[add_token, add_token.new_full((-diff,), 0)]
)
add_attention_mask[i, diff:] = 0
else:
add_input_ids[i] = self.crop_to_max_size(add_token, target_size)
labels = [b['label'] for b in batch]
## for BCE & Cross Entropy
labels_dtype = torch.float if type(labels[0]) == list or type(labels[0])==np.ndarray else torch.long
return {
## audio 속성
'audio': collated_audios, 'padding_mask': padding_mask,
## language 속성
'input_ids' : input_ids,
'token_type_ids' : token_type_ids,
'attention_mask' : attention_mask,
## add language 속성
'add_input_ids': add_input_ids,
'add_token_type_ids': add_token_type_ids,
'add_attention_mask': add_attention_mask,
## label 속성
'label': torch.tensor(labels, dtype=labels_dtype),
}
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def load_dataloader(dataset:SentimentDataset, shuffle:bool=True, batch_size:int=2):
## 잠깐 수정
return DataLoader(
dataset, shuffle=shuffle, batch_size=batch_size, collate_fn=dataset.collater
)
```
#### File: src/dataset/word_process.py
```python
from konlpy.tag import Mecab
from sklearn.preprocessing import normalize
from soynlp.hangle import decompose, character_is_korean
import numpy as np
class Word2Vec:
def __init__(self, vecs_txt_fname, vecs_bin_fname=None, method="word2vec", tokenizer_name="mecab"):
self.tokenizer = get_tokenizer(tokenizer_name)
self.tokenizer_name = tokenizer_name
self.method = method
self.dictionary, self.words, self.vecs = self.load_vectors(vecs_txt_fname, method)
self.dim = self.vecs.shape[1]
if "fasttext" in method:
from fasttext import load_model as load_ft_model
self.model = load_ft_model(vecs_bin_fname)
def get_word_vector(self, word):
if self.method == "fasttext-jamo":
word = jamo_sentence(word)
if self._is_in_vocabulary(word):
vector = self.dictionary[word]
else:
if "fasttext" in self.method:
vector = self.model.get_word_vector(word)
else:
vector = np.zeros(self.dim)
print("설마 여기인가?")
return vector
def load_vectors(self, vecs_fname, method):
if method == "word2vec":
from gensim.models import Word2Vec
model = Word2Vec.load(vecs_fname)
## gensim fuction 'index2word' is replaced to 'index_to_key'
## words = model.wv.index2word
words = model.wv.index_to_key
vecs = model.wv.vectors
else:
words, vecs = [], []
with open(vecs_fname, 'r', encoding='utf-8') as f:
if "fasttext" in method:
next(f) # skip head line
for line in f:
if method == "swivel":
splited_line = line.strip().split("\t")
else:
splited_line = line.strip().split(" ")
words.append(splited_line[0])
vec = [float(el) for el in splited_line[1:]]
vecs.append(vec)
unit_vecs = normalize(vecs, norm='l2', axis=1)
dictionary = {}
for word, vec in zip(words, unit_vecs):
dictionary[word] = vec
return dictionary, words, unit_vecs
def tokenize(self, sentence: str):
sentence = sentence.strip()
if self.tokenizer_name == "khaiii":
tokens = []
for word in self.tokenizer.analyze(sentence):
tokens.extend([str(m).split("/")[0] for m in word.morphs])
else:
tokens = self.tokenizer.morphs(sentence)
return tokens
def encode(self, data):
assert type(data) in [str, list], 'not supported data-type [{}]'.format(type(data))
output = None
if type(data) == str:
sentence = data
tokens = self.tokenize(sentence)
encoded_tokens = [self.get_word_vector(token) for token in tokens]
output = np.array(encoded_tokens)
if type(data) == list:
output = list()
for sentence in data:
tokens = self.tokenize(sentence)
encoded_tokens = [self.get_word_vector(token) for token in tokens]
encoded_tokens = np.array(encoded_tokens)
output.append(encoded_tokens)
return output
def post_processing(self, tokens):
results = []
for token in tokens:
# 숫자에 공백을 주어서 띄우기
processed_token = [el for el in re.sub(r"(\d)", r" \1 ", token).split(" ") if len(el) > 0]
results.extend(processed_token)
return results
def jamo_to_word(self, jamo):
jamo_list, idx = [], 0
while idx < len(jamo):
if not character_is_korean(jamo[idx]):
jamo_list.append(jamo[idx])
idx += 1
else:
jamo_list.append(jamo[idx:idx + 3])
idx += 3
word = ""
for jamo_char in jamo_list:
if len(jamo_char) == 1:
word += jamo_char
elif jamo_char[2] == "-":
word += compose(jamo_char[0], jamo_char[1], " ")
else:
word += compose(jamo_char[0], jamo_char[1], jamo_char[2])
return word
def _is_in_vocabulary(self, word):
if self.method == "fasttext-jamo":
word = jamo_sentence(word)
return word in self.dictionary.keys()
def get_tokenizer(tokenizer_name):
from konlpy.tag import Okt, Komoran, Mecab, Hannanum, Kkma
if tokenizer_name == "komoran":
tokenizer = Komoran()
elif tokenizer_name == "okt":
tokenizer = Okt()
elif tokenizer_name == "mecab":
tokenizer = Mecab()
elif tokenizer_name == "hannanum":
tokenizer = Hannanum()
elif tokenizer_name == "kkma":
tokenizer = Kkma()
elif tokenizer_name == "khaiii":
from khaiii import KhaiiiApi
tokenizer = KhaiiiApi()
else:
tokenizer = Mecab()
return tokenizer
from soynlp.hangle import decompose, character_is_korean
import re
doublespace_pattern = re.compile('\s+')
def jamo_sentence(sent):
def transform(char):
if char == ' ':
return char
cjj = decompose(char)
if len(cjj) == 1:
return cjj
cjj_ = ''.join(c if c != ' ' else '-' for c in cjj)
return cjj_
sent_ = []
for char in sent:
if character_is_korean(char):
sent_.append(transform(char))
else:
sent_.append(char)
sent_ = doublespace_pattern.sub(' ', ''.join(sent_))
return sent_
```
#### File: src/model/audio_transformer.py
```python
import torch
from torch import nn
import os
import sys
from torch.nn import functional as F
from collections import OrderedDict
from dataclasses import _MISSING_TYPE, dataclass, field
from .audio_model import AudioModel, AudioModelConfig
from src.dataset.audio_dataset import AudioFeatureConfig
from src.process.label_preprocess import LabelPreprocessConfig
@dataclass
class AudioTransformerConfig(AudioModelConfig):
name:str = 'audio_transformer'
num_layers: int = 4
hidden_dim: int = 20
head_num: int = 4
final_dropout:float=0.1
last_hidden: bool = False
max_pool: bool = False
kernel_size: int = 3
proj_layer:str=''
class AudioTransformerModel(AudioModel):
def __init__(self, model_cfg:AudioTransformerConfig, feature_cfg:AudioFeatureConfig, label_cfg:LabelPreprocessConfig):
super(AudioTransformerModel, self).__init__(model_cfg, feature_cfg, label_cfg)
def calculate_padding(kernel_size, dilation=1):
padding = (dilation * (kernel_size - 1)) / 2
return int(padding)
hidden_dim = self.encoder_dim
self.proj_layer = model_cfg.proj_layer
if self.proj_layer == 'conv':
assert model_cfg.kernel_size % 2 == 1, "kernel은 항상 홀수여야 함 다시 확인하세요".format(model_cfg.kernel_size)
padding = self._calculate_padding(model_cfg.kernel_size)
self.audio_porj = nn.Conv1d(self.encoder_dim, model_cfg.hidden_dim,
kernel_size=model_cfg.kernel_size,
padding=padding, bias=False)
hidden_dim = model_cfg.hidden_dim
elif self.proj_layer == 'linear':
self.audio_porj = nn.Linear(self.encoder_dim, model_cfg.hidden_dim)
hidden_dim = model_cfg.hidden_dim
self.layers = nn.ModuleList([])
#print("hidden_dim", hidden_dim)
#print("encoder_dim", self.encoder_dim)
#self.encoder_norm = LayerNorm(self.encoder_dim)
for layer in range(model_cfg.num_layers):
new_layer = nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=model_cfg.head_num)
self.layers.append(new_layer)
#self.encoder = nn.Conv1d(768, args.hidden_dim, kernel_size=3, padding=1, bias=False) # 5
self.final_dropout = nn.Dropout(model_cfg.final_dropout)
self.proj = nn.Linear(hidden_dim, self.num_class)
self.last_hidden=model_cfg.last_hidden
self.max_pool = model_cfg.max_pool
def forward(self, audio, padding_mask=None, **kwargs):
## Audio
outputs, padding_mask = self.w2v_encoding(audio, padding_mask)
if hasattr(self, 'audio_porj'):
if type(self.audio_porj) == nn.Conv1d:
# for conv, (B, L, D) => (B, D, L)
outputs = outputs.transpose(1, 2)
# (B, d, L) => (B, L, d)
outputs = self.audio_porj(outputs).transpose(1, 2)
else:
outputs = self.audio_porj(outputs)
## Calculate Padding
if len(padding_mask.shape) > 2:
padding_mask = padding_mask[:, :, 0]
audio_pad_true_mask = padding_mask.bool() if padding_mask is not None else None
audio_pad_false_mask = (padding_mask == 0) if padding_mask is not None else None
# for conv, (B, L, D) => (L, B, D)
outputs = outputs.transpose(0, 1)
for layer in self.layers:
outputs = layer(outputs, src_key_padding_mask=audio_pad_true_mask)
# for conv, (L, B, D) => (B, L, D)
outputs = outputs.transpose(0, 1)
#print('audio_pad_false_mask', audio_pad_false_mask)
#print('audio_pad_true_mask', audio_pad_true_mask)
if not self.last_hidden:
outputs = self._masked_mean(outputs, audio_pad_false_mask, dim=1)
else:
# (bat, dim)
outputs = outputs[:,0,:]
#print("outputs1", outputs.shape)
outputs = self.final_dropout(outputs)
outputs = self.proj(outputs)
return {
'output': outputs,
}
def get_logits(self, net_output):
logits = net_output["output"]
padding = net_output["padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][..., 0] = 0
logits[padding][..., 1:] = float('-inf')
return logits
```
#### File: src/model/conformer.py
```python
import torch
from torch import nn
import os
import sys
from torch.nn import functional as F
from collections import OrderedDict
from conformer import ConformerBlock
from dataclasses import _MISSING_TYPE, dataclass, field
from .audio_model import AudioModelConfig, AudioModel
from src.dataset.audio_dataset import AudioFeatureConfig
from src.process.label_preprocess import LabelPreprocessConfig
@dataclass
class ConformerConfig(AudioModelConfig):
name:str = 'conformer'
kernel_size: int = 32
hidden_dim: int = 80
head_dim: int = 10
head_num: int = 4
ff_mult: int = 4
num_layers: int = 4
attn_dropout: float = 0.1
ff_dropout: float = 0.1
conv_dropout: float = 0.1
final_dropout: float = 0.1
last_hidden:bool=False
max_pool: bool = False
class Conformer(AudioModel):
def __init__(self, model_cfg:ConformerConfig, feature_cfg:AudioFeatureConfig, label_cfg:LabelPreprocessConfig):
super(Conformer, self).__init__(model_cfg, feature_cfg, label_cfg)
def calculate_padding(kernel_size, dilation=1):
padding = (dilation * (kernel_size - 1)) / 2
return int(padding)
assert model_cfg.kernel_size % 2 == 1, "kernel은 항상 홀수여야 함 다시 확인하세요".format(model_cfg.kernel_size)
padding = calculate_padding(model_cfg.kernel_size)
self.encoder = nn.Conv1d(self.encoder_dim, model_cfg.hidden_dim, kernel_size=model_cfg.kernel_size, padding=padding, bias=False)
self.layers = nn.ModuleList([])
for layer in range(model_cfg.num_layers):
new_layer = ConformerBlock(
dim=model_cfg.hidden_dim,
dim_head=model_cfg.head_dim,
heads=model_cfg.head_num,
ff_mult=model_cfg.ff_mult,
conv_expansion_factor=2,
conv_kernel_size=model_cfg.kernel_size,
attn_dropout=model_cfg.attn_dropout,
ff_dropout=model_cfg.ff_dropout,
conv_dropout=model_cfg.conv_dropout,
)
self.layers.append(new_layer)
#self.encoder = nn.Conv1d(768, args.hidden_dim, kernel_size=3, padding=1, bias=False) # 5
self.final_dropout = nn.Dropout(model_cfg.final_dropout)
self.proj = nn.Linear(model_cfg.hidden_dim, self.num_class)
self.last_hidden=model_cfg.last_hidden
self.max_pool = model_cfg.max_pool
def forward(self, audio, padding_mask=None, **kwargs):
self.w2v_encoder.eval()
audio, padding_mask = self.w2v_encoding(audio, padding_mask)
# for conv, (B, L, D) => (B, D, L)
x = audio.transpose(1, 2)
# (B, d, L) => (B, L, d)
x = self.encoder(x).transpose(1, 2)
audio_pad_true_mask = padding_mask.bool() if padding_mask is not None else None
audio_pad_false_mask = (padding_mask==0).bool() if padding_mask is not None else None
for layer in self.layers:
x = layer(x)
if not self.last_hidden:
def masked_mean(tensor_x, mask=None, dim=0):
if mask is None:
return tensor_x.mean(dim=dim)
mask = mask.unsqueeze(2)
masked = tensor_x * mask # Apply the mask using an element-wise multiply
return masked.sum(dim=dim) / mask.sum(dim=dim) # Find the average!
output = masked_mean(x, audio_pad_false_mask, dim=1)
else:
# (bat, dim)
output = x[:,-1,:]
output = self.final_dropout(output)
output = self.proj(output)
return {
'output': output,
'padding_mask': padding_mask,
}
def get_logits(self, net_output):
logits = net_output["output"]
padding = net_output["padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][..., 0] = 0
logits[padding][..., 1:] = float('-inf')
return logits
```
#### File: emotion_recognition/src/model.py
```python
import torch
from torch import nn
from torch.nn import functional as F
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from transformers import BertTokenizerFast, BertModel
from torch.nn import MultiheadAttention, Linear, Dropout, LayerNorm
from torch import Tensor
from typing import Optional, Any
class SentimentTransformer(nn.Module):
def __init__(self, args):
super(SentimentTransformer, self).__init__()
audio_processor = Wav2Vec2Processor.from_pretrained(args.audio_model)
audio_model = Wav2Vec2ForCTC.from_pretrained(args.audio_model)
language_tokenizer = BertTokenizerFast.from_pretrained(args.language_model)
language_model = BertModel.from_pretrained(args.language_model)
for idx in range(args.num_layers):
print()
## https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
class TransformerEncoderLayer(nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = F.relu
def forward(self, query: Tensor, key: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(query, key, key, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = query + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
```
#### File: src/process/audio_preprocess.py
```python
import os
from tqdm import tqdm
import torchaudio
import torch
from .base_preprocess import BaseProcessor, BasicProcessConfig
from pathlib import Path
import librosa
from pydub import AudioSegment, effects
import soundfile as sf
from dataclasses import _MISSING_TYPE, dataclass, field
import numpy as np
@dataclass
class AudioPreprocess(BasicProcessConfig):
name: str = 'audio_prerpocess'
column_name: str = 'audio'
audio_dir:str = '/code/gitRepo/sentiment_speech/data/sentiment_4/audio'
ext:str='wav'
default_sample_rate:int = 16000
del_silence:bool = True
class AudioProcessor(BaseProcessor):
def __init__(self, cfg:AudioPreprocess):
super(AudioProcessor, self).__init__(cfg)
self.default_sample_rate = cfg.default_sample_rate
self.del_silence = cfg.del_silence
self.ext = cfg.ext
self.audio_dir=cfg.audio_dir
self.column_name=cfg.column_name
temp_folder = 'temp'
os.makedirs(temp_folder, exist_ok=True)
temp_path = os.path.join(temp_folder, "{}.wav".format(self.__class__.__name__))
self.temp_path = temp_path
def _normalize_audio(self, audio_path, temp_path):
audio_extension = Path(audio_path).suffix.replace('.', '')
sound_Obj = AudioSegment.from_file(audio_path, format=audio_extension)
sound_Obj = sound_Obj.set_frame_rate(self.default_sample_rate)
sound_Obj = effects.normalize(sound_Obj)
sound_Obj.export(temp_path, format="wav")
def _id_to_wav(self, audio_path, temp_path):
if not os.path.isfile(audio_path):
return None
## normalize Audio
self._normalize_audio(audio_path, temp_path)
wav, curr_sample_rate = sf.read(temp_path)
if self.del_silence:
non_silence_indices = librosa.effects.split(wav, top_db=30)
wav = np.concatenate([wav[start:end] for start, end in non_silence_indices])
return wav
def convert(self, data):
vector = self.audio2vec(torch.Tensor(data, device=self.device))
return vector.cpu().T.numpy()
def convert_data(self, script_df):
## for tqdm
tqdm.pandas()
script_df['audio'] = script_df.progress_apply(
lambda x: self.convert(x['audio']), axis=1)
print('done')
return script_df
```
#### File: src/speech_to_text/kakao_api.py
```python
import os
from pathlib import Path
import requests
import json
from .stt_api import RestAPI, APIConfig
from dataclasses import _MISSING_TYPE, dataclass, field
import logging
@dataclass
class KaKaoConfig(APIConfig):
name:str = 'kakao'
kakao_key:str = '0dfec036628c8c40d61ead0bd108c469'
class KakaoAPI(RestAPI):
def __init__(self, cfg:KaKaoConfig):
super(KakaoAPI, self).__init__(cfg)
self.kakao_key = cfg.kakao_key
return
def _recognize(self, audio_path, temp_path):
if not os.path.isfile(audio_path):
print("NO audio file [{}]".format(audio_path))
return None
try:
# normalize Audio
self._normalize_audio(audio_path, temp_path)
# Loads the audio into memory
kakao_speech_url = "https://kakaoi-newtone-openapi.kakao.com/v1/recognize"
headers = {
"Content-Type": "application/octet-stream",
"X-DSS-Service": "DICTATION",
"Authorization": "KakaoAK " + self.kakao_key,
}
with open(temp_path, 'rb') as fp:
audio = fp.read()
res = requests.post(kakao_speech_url, headers=headers, data=audio)
result_json_string = res.text[res.text.index('{"type":"finalResult"'):res.text.rindex('}') + 1]
result = json.loads(result_json_string)
return result['value']
except Exception as e:
logging.info("Problem in audio file [{}]".format(audio_path))
logging.info('[Failed message]' + str(e))
return None
```
#### File: src/speech_to_text/stt_api.py
```python
import os
from pathlib import Path
from tqdm import tqdm
from pydub import AudioSegment, effects
import soundfile as sf
import logging
from src.configs import BasicConfig
from dataclasses import _MISSING_TYPE, dataclass, field
@dataclass
class APIConfig(BasicConfig):
name:str = 'api'
column_name:str='stt'
audio_dir:str='data/voice_sentiment_4/audio'
ext:str='wav'
modified_flag:bool=True
class RestAPI:
default_sample_rate=16000
modified_ids = {
'5e2979c25807b852d9e018d5': '5e37def1dbc4b7182a6a9e44',
'5e298bc45807b852d9e01a10': '5e38a1d805fef317e874c5f7',
'5e2998b85807b852d9e01b02': '5e37da3e33e9ad176cc9b2b1',
'5e33638b5807b852d9e04aeb': '5e37e25905fef317e874c12e',
'5e32924e5807b852d9e03894': '5e38d5c133e9ad176cc9b8e5',
'5e2ad4145807b852d9e020d9': '5e3937ab05fef317e874c913',
'5e31622f5807b852d9e032ba': '5e393ea333e9ad176cc9bae6',
'5e2ad43e5807b852d9e020dc': '5e39383fdbc4b7182a6aa5f4',
'5e298bdc5807b852d9e01a11': '5e38a2037995ef170fc0f96c',
'5e298c085807b852d9e01a12': '5e38a219c8c25f16cd145d25',
'5e3292825807b852d9e0389a': '5e38d655dbc4b7182a6aa405',
'5e298b9f5807b852d9e01a0f': '5e38a184c8c25f16cd145d22',
'5e315dca5807b852d9e03275': '5e380ef305fef317e874c3b5',
'5e3292655807b852d9e03896': '5e38d5fe05fef317e874c705',
'5e33a9d35807b852d9e050f4': '5e37e2f005fef317e874c13e',
'5e3161c65807b852d9e032af': '5e393e55ee8206179943d383',
}
def __init__(self, cfg:APIConfig):
self.name = cfg.name
self.column_name = cfg.column_name
self.audio_dir = cfg.audio_dir
self.ext = cfg.ext
self.modified_flag = cfg.modified_flag
temp_folder = 'temp'
os.makedirs(temp_folder, exist_ok=True)
temp_path = os.path.join(temp_folder, "{}.wav".format(self.__class__.__name__))
self.temp_path = temp_path
def run_stt(self, script_df):
logging.info('start to call Speech to Text API')
## for tqdm
tqdm.pandas()
if self.modified_flag:
for wav_id, modified_id in self.modified_ids.items():
mask = script_df['wav_id'] == wav_id
script_df.loc[mask, 'wav_id'] = modified_id
if 'stt' not in script_df.columns:
script_df['stt'] = None
## find None rows
null_rows = script_df['stt'].isnull()
## apply to stt
if null_rows.sum() > 0:
script_df.loc[null_rows, 'stt'] = script_df.loc[null_rows].progress_apply(
lambda x: self._recognize(os.path.join(self.audio_dir, "{}.{}".format(x['wav_id'], self.ext)), self.temp_path), axis=1)
logging.info('End Speech to Text API')
return script_df
def _normalize_audio(self, audio_path, temp_path):
audio_extension = Path(audio_path).suffix.replace('.', '')
sound_Obj = AudioSegment.from_file(audio_path, format=audio_extension)
sound_Obj = sound_Obj.set_frame_rate(self.default_sample_rate)
sound_Obj = effects.normalize(sound_Obj)
sound_Obj.export(temp_path, format="wav")
def _id_to_wav(self, audio_path, temp_path):
if not os.path.isfile(audio_path):
return None
## normalize Audio
self._normalize_audio(audio_path, temp_path)
wav, curr_sample_rate = sf.read(temp_path)
return wav
def _recognize(self, audio_path:str, temp_path:str):
raise NotImplementedError
## normalize audio
# script_df['audio'] = None
# null_rows = script_df['audio'].isnull()
# if null_rows.sum() > 0:
# script_df.loc[null_rows, 'audio'] = script_df.loc[null_rows].progress_apply(
# lambda x: self._id_to_wav(os.path.join(self.audio_dir, "{}.{}".format(x['wav_id'], self.ext)),
# self.temp_path),
# axis=1)
```
#### File: emotion_recognition/src/train.py
```python
import hydra
import os
from src.utils import reset_logging, init, load_data, save_data, ResultWriter
import logging
from src.dataset.sentiment_dataset import SentimentDataset, load_dataloader
from src.model import get_model_class
from src.configs import DefaultConfig
import torch
import numpy as np
from torch.utils.data import (
DataLoader
)
from transformers import (
AdamW,
BertForSequenceClassification,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
import random
from tqdm import tqdm, trange
from collections import OrderedDict
LOSS_MAPPING = OrderedDict(
[
# Add configs here
("bce", torch.nn.BCELoss()),
('cross', torch.nn.NLLLoss()),
]
)
from sklearn.metrics import accuracy_score, f1_score
from pathlib import Path
@hydra.main(config_path=os.path.join("..", "configs"), config_name="train")
def main(cfg):
## Resent Logging
reset_logging()
args = cfg.base
## GPU setting
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
set_seed(args.seed)
## load_dataset
train_dataset = SentimentDataset.load_with_config(cfg, 'train')
valid_dataset = SentimentDataset.load_with_config(cfg, 'valid')
eval_dataset = SentimentDataset.load_with_config(cfg, 'test')
## load model
model = get_model_class(cfg.model).load_with_config(cfg)
model.to(args.device)
writer = ResultWriter(args.experiments_path)
results = {}
## training
train_results = train(args, cfg.label, train_dataset, valid_dataset, model)
results.update(**train_results)
args.pretrained_model = args.save_path
loss_pretrained_model_path = os.path.join(args.pretrained_model, 'loss')
model = get_model_class(cfg.model).from_pretrained(loss_pretrained_model_path)
model.to(args.device)
test_results = evaluate(args, cfg.label, valid_dataset, model)
test_results.update(
{
'valid_check': 'loss',
'split': 'valid',
}
)
results.update(test_results)
writer.update(cfg.base, cfg.model, cfg.label, cfg.audio_feature, cfg.language_feature, **results)
## test
results = {}
test_results = evaluate(args, cfg.label, eval_dataset, model)
test_results.update(
{
'valid_check': 'loss',
'split': 'test',
}
)
results.update(test_results)
writer.update(cfg.base, cfg.model, cfg.label, cfg.audio_feature, cfg.language_feature, **results)
loss_pretrained_model_path = os.path.join(args.pretrained_model, 'accuracy')
model = get_model_class(cfg.model).from_pretrained(loss_pretrained_model_path)
model.to(args.device)
results = {}
test_results = evaluate(args, cfg.label, eval_dataset, model)
test_results.update(
{
'valid_check': 'accuracy',
'split': 'test',
}
)
results.update(test_results)
writer.update(cfg.base, cfg.model, cfg.label, cfg.audio_feature, cfg.language_feature, **results)
def train(args:DefaultConfig, label_cfg, train_dataset, valid_dataset, model):
logging.info("start training")
## load dataloader
train_dataloader = load_dataloader(
train_dataset, shuffle=True, batch_size=args.train_batch_size
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
)
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
wav2vec_parameter_name = 'w2v_encoder'
bert_parameter_name = 'bert_encoder'
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and wav2vec_parameter_name in n
],
"weight_decay": args.weight_decay,
'lr' : args.wav2vec_learning_rate if args.wav2vec_learning_rate > 0 else args.learning_rate,
},
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and bert_parameter_name in n
],
"weight_decay": args.weight_decay,
'lr' : args.bert_learning_rate if args.bert_learning_rate > 0 else args.learning_rate,
},
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and wav2vec_parameter_name not in n and bert_parameter_name not in n
],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
args.warmup_steps = int(args.warmup_percent * t_total)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
logging.info(" Num Epochs = %d", args.num_train_epochs)
logging.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logging.info(" Total optimization steps = %d", t_total)
logging.info(" Train Batch size = %d", args.train_batch_size)
logging.info(" Train Data size = %d", len(train_dataset))
step = 0
global_step = 0
best_loss = 1e10
best_loss_step = 0
best_f1 = 0
best_f1_step = 0
stop_iter = False
train_iterator = trange(0, int(args.num_train_epochs), desc="Epoch")
model.zero_grad()
model.set_step(global_step)
for _ in train_iterator:
for batch in train_dataloader:
step += 1
model.train()
batch = {key:item.to(args.device) for key, item in batch.items() if type(item)==torch.Tensor}
net_output = model(**batch)
final_loss = model.get_loss(net_output, batch['label'])
if args.gradient_accumulation_steps > 1:
final_loss = final_loss / args.gradient_accumulation_steps
if args.n_gpu > 1:
final_loss = final_loss.mean()
if args.fp16:
with amp.scale_loss(final_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
final_loss.backward()
train_iterator.set_postfix_str(s="loss = {:.8f}".format(float(final_loss)), refresh=True)
if step % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
## set global step
model.set_step(global_step)
if (args.logging_steps > 0 and global_step % args.logging_steps == 0 and global_step > 0):
# if (args.logging_steps > 0 and global_step % args.logging_steps == 0):
results = evaluate(args, label_cfg, valid_dataset, model, global_step)
eval_loss = results['loss']
eval_f1 = results['f1_score']
eval_acc = results['accuracy']
if eval_loss < best_loss:
best_loss = eval_loss
#best_f1 = eval_f1
best_loss_step = global_step
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(os.path.join(args.save_path, 'loss'))
#torch.save(args, os.path.join(args.save_path, 'loss', "training_args.bin"))
if eval_f1 > best_f1:
best_f1 = eval_f1
best_f1_step = global_step
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(os.path.join(args.save_path, 'accuracy'))
#torch.save(args, os.path.join(args.save_path, 'accuracy', "training_args.bin"))
logging.info("***** best_acc : %.4f *****", eval_acc)
logging.info("***** best_f1 : %.4f *****", best_f1)
logging.info("***** best_loss : %.4f *****", best_loss)
if args.max_steps > 0 and global_step > args.max_steps:
stop_iter = True
break
if stop_iter:
break
return {'best_valid_loss': best_loss,
'best_valid_loss_step': best_loss_step,
'best_valid_f1': eval_f1,
'best_valid_f1_step': best_f1_step,
}
def evaluate(args, label_cfg, test_dataset, model, global_step=0):
## load dataloader
test_dataloader = load_dataloader(
test_dataset, shuffle=False, batch_size=args.eval_batch_size
)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
logging.info("***** Running evaluation {}*****".format(global_step))
eval_loss = 0.0
nb_eval_steps = 0
total_preds = []
total_labels = []
criterion = LOSS_MAPPING[label_cfg.loss_type]
model.eval()
for batch in tqdm(test_dataloader, desc="Evaluating"):
batch = {key: item.to(args.device) for key, item in batch.items() if type(item) == torch.Tensor}
with torch.no_grad():
net_output = model(**batch)
lprobs = model.get_normalized_probs(
net_output, log_probs=True, loss_type=label_cfg.loss_type,
).contiguous()
probs = model.get_normalized_probs(
net_output, log_probs=False, loss_type=label_cfg.loss_type,
).contiguous()
entropy_loss = criterion(lprobs, batch['label'])
eval_loss += entropy_loss.mean().item()
nb_eval_steps += 1
if label_cfg.loss_type == 'bce':
preds = probs.detach().cpu()
else:
preds = probs.detach().cpu().argmax(axis=1)
labels = batch['label'].detach().cpu()
total_preds.append(preds)
total_labels.append(labels)
total_preds = torch.cat(total_preds)
total_labels = torch.cat(total_labels)
eval_loss = eval_loss/nb_eval_steps
if label_cfg.loss_type == 'bce':
f1, acc, eval_dict = eval_iemocap(label_cfg, total_preds, total_labels)
results = {
'loss': eval_loss,
'accuracy': acc,
'f1_score': f1,
}
results.update(eval_dict)
for key, item in eval_dict.items():
logging.info(" %s = %s", str(key), str(item))
else:
acc = accuracy_score(total_labels,total_preds)
f1 = f1_score(total_labels, total_preds, average="weighted")
results = {
'loss' : eval_loss,
'accuracy' : acc,
'f1_score' : f1,
}
f1, acc, eval_dict = eval_iemocap(label_cfg, total_preds, total_labels)
results.update(eval_dict)
for key, item in eval_dict.items():
logging.info(" %s = %s", str(key), str(item))
# f1, acc, eval_dict =eval_iemocap(config, total_preds, total_labels)
#
# results = {
# "loss": eval_loss,
# "accuracy": acc,
# 'f1_score' : f1,
# }
# results.update(eval_dict)
# for key in sorted(results.keys()):
# logger.info(" %s = %s", key, str(results[key]))
logging.info(" %s = %s", 'loss', str(results['loss']))
logging.info(" %s = %s", 'f1_score', str(results['f1_score']))
logging.info(" %s = %s", 'acc', str(results['accuracy']))
model.train()
return results
def eval_iemocap(label_cfg, results, truths, single=-1):
logging.info("eval with iemocap formulation")
f1s = []
accs = []
save_dict = {}
if label_cfg.loss_type == 'bce':
emos = eval(label_cfg.selected_class)
# if single < 0:
test_preds = results.cpu().detach().numpy()
test_truth = truths.cpu().detach().numpy()
for emo_ind in range(len(emos)):
#logger.info(f"{emos[emo_ind]}: ")
test_preds_i = np.round(test_preds[:, emo_ind])
test_truth_i = test_truth[:, emo_ind]
f1 = f1_score(test_truth_i, test_preds_i, average="weighted")
f1s.append(f1)
acc = accuracy_score(test_truth_i, test_preds_i)
accs.append(acc)
#logger.info(" - F1 Score: %.3f", f1)
#logger.info(" - Accuracy: %.3f", acc)
save_dict['{}_f1'.format(emos[emo_ind])]=f1
save_dict['{}_acc'.format(emos[emo_ind])]=acc
save_dict['{}_count'.format(emos[emo_ind])]=sum(test_preds_i)/sum(test_truth_i)
if label_cfg.loss_type == 'cross':
emos = eval(label_cfg.selected_class)
# if single < 0:
test_preds = results.cpu().detach().numpy()
test_truth = truths.cpu().detach().numpy()
test_preds = np.eye(len(emos))[test_preds]
test_truth = np.eye(len(emos))[test_truth]
for emo_ind in range(len(emos)):
#logger.info(f"{emos[emo_ind]}: ")
test_preds_i = test_preds[:, emo_ind]
test_truth_i = test_truth[:, emo_ind]
f1 = f1_score(test_truth_i, test_preds_i, average="weighted")
f1s.append(f1)
acc = accuracy_score(test_truth_i, test_preds_i)
accs.append(acc)
#logger.info(" - F1 Score: %.3f", f1)
#logger.info(" - Accuracy: %.3f", acc)
save_dict['{}_f1'.format(emos[emo_ind])]=f1
save_dict['{}_acc'.format(emos[emo_ind])]=acc
save_dict['{}_count'.format(emos[emo_ind])] = sum(test_preds_i) / sum(test_truth_i)
save_dict.update({
"f1_2class" :np.mean(f1s),
"acc_2class": np.mean(accs),
})
return np.mean(f1s), np.mean(accs), save_dict
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 0:
torch.cuda.manual_seed_all(seed)
if __name__ == "__main__":
init()
main()
``` |
{
"source": "JoungheeKim/uda_pytorch",
"score": 2
} |
#### File: uda_pytorch/src/back_translation.py
```python
import argparse
import torch
import os
import json
import pickle
from nltk.tokenize import sent_tokenize
import nltk
nltk.download('punkt')
import datasets
from tqdm import tqdm
import numpy as np
import random
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 0:
torch.cuda.manual_seed_all(seed)
def clean_web_text(st):
"""
Adapted from UDA official code
https://github.com/google-research/uda/blob/master/text/utils/imdb_format.py
"""
st = st.replace("<br />", " ")
st = st.replace(""", '"')
st = st.replace("<p>", " ")
if "<a href=" in st:
while "<a href=" in st:
start_pos = st.find("<a href=")
end_pos = st.find(">", start_pos)
if end_pos != -1:
st = st[:start_pos] + st[end_pos + 1 :]
else:
st = st[:start_pos] + st[start_pos + len("<a href=")]
st = st.replace("</a>", "")
st = st.replace("\\n", " ")
return st
def split_sent_by_punc(sent, punc_list, max_len):
"""
Adapted from UDA official code
https://github.com/google-research/uda/blob/master/back_translate/split_paragraphs.py
"""
if len(punc_list) == 0 or len(sent) <= max_len:
return [sent]
punc = punc_list[0]
if punc == " " or not punc:
offset = 100
else:
offset = 5
sent_list = []
start = 0
while start < len(sent):
if punc:
pos = sent.find(punc, start + offset)
else:
pos = start + offset
if pos != -1:
sent_list += [sent[start: pos + 1]]
start = pos + 1
else:
sent_list += [sent[start:]]
break
new_sent_list = []
for temp_sent in sent_list:
new_sent_list += split_sent_by_punc(temp_sent, punc_list[1:], max_len)
return new_sent_list
def split_sent(content, max_len):
"""
Adapted from UDA Official code
https://github.com/google-research/uda/blob/master/back_translate/split_paragraphs.py
"""
sent_list = sent_tokenize(content)
new_sent_list = []
split_punc_list = [".", ";", ",", " ", ""]
for sent in sent_list:
new_sent_list += split_sent_by_punc(sent, split_punc_list, max_len)
return new_sent_list, len(new_sent_list)
## batch iteration
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
## save pickle
def save_pickle(path, data):
with open(path, 'wb') as handle:
pickle.dump(data, handle)
def run(args):
## load translator
src2tgt = torch.hub.load("pytorch/fairseq", args.src2tgt_model, tokenizer=args.tokenizer, bpe=args.bpe).to(
args.device).eval()
tgt2src = torch.hub.load("pytorch/fairseq", args.tgt2src_model, tokenizer=args.tokenizer, bpe=args.bpe).to(
args.device).eval()
## load Dataset
imdb_data = datasets.load_dataset('imdb')
data_list = ['train', 'test', 'unsupervised']
for dataname in tqdm(data_list, desc='data name'):
temp_dataset = imdb_data[dataname]
temp_docs = temp_dataset['text']
temp_label = temp_dataset['label']
## clean web tag from text
temp_docs = [clean_web_text(temp_sent) for temp_sent in temp_docs]
new_contents = []
new_contents_length = []
for temp_doc in temp_docs:
new_sents, new_sents_length = split_sent(temp_doc, args.max_len)
new_contents += new_sents
new_contents_length += [new_sents_length]
backtranslated_contents = []
for contents in tqdm(batch(new_contents, args.batch_size), total=int(len(new_contents)/args.batch_size)):
with torch.no_grad():
translated_data = src2tgt.translate(
contents,
sampling=True if args.temperature is not None else False,
temperature=args.temperature,
)
back_translated_data = tgt2src.translate(
translated_data,
sampling=True if args.temperature is not None else False,
temperature=args.temperature,
)
backtranslated_contents += back_translated_data
merge_backtranslated_contents=[]
merge_new_contents = []
cumulate_length = 0
for temp_length in new_contents_length:
merge_backtranslated_contents += [" ".join(backtranslated_contents[cumulate_length:cumulate_length + temp_length])]
merge_new_contents += [" ".join(new_contents[cumulate_length:cumulate_length + temp_length])]
cumulate_length += temp_length
save_data = {
'raw_text' : temp_docs,
'label' : temp_label,
'clean_text' : merge_new_contents,
'backtranslated_text' : merge_backtranslated_contents,
}
save_path = os.path.join(args.save_dir, "{}.p".format(dataname))
save_pickle(save_path, save_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--save_dir",
default=None,
type=str,
required=True,
help="Save Directory",
)
parser.add_argument(
"--src2tgt_model",
default='transformer.wmt19.en-de.single_model',
type=str,
help="torch HUB translation Model(source->target)",
)
parser.add_argument(
"--tgt2src_model",
default='transformer.wmt19.de-en.single_model',
type=str,
help="torch HUB translation Model(target->source)",
)
parser.add_argument(
"--bpe",
default='fastbpe',
type=str,
help="torch HUB translation bpe option",
)
parser.add_argument(
"--tokenizer",
default='moses',
type=str,
help="torch HUB translation tokenizer",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="if you don't want to use CUDA"
)
parser.add_argument(
"--batch_size",
default=16,
type=int,
help="Back-translation Batch size"
)
parser.add_argument(
"--max_len",
default=300,
type=int,
help="Translation Available length"
)
parser.add_argument(
"--temperature",
default=0.9,
type=float,
help="Translation Available length"
)
parser.add_argument(
"--seed",
default=1,
type=int,
help="Translation Available length"
)
args = parser.parse_args()
args.device = 'cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu'
## make save path
os.makedirs(args.save_dir, exist_ok=True)
## set Seed
set_seed(args.seed)
def _print_config(config):
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(config))
_print_config(args)
run(args)
```
#### File: uda_pytorch/src/data_utils.py
```python
import random
import numpy as np
import torch
import pickle
import torch
import os
from torch.utils.data import (
Dataset,
)
from datetime import datetime
import pandas as pd
import logging
logger = logging.getLogger(__name__)
## TEMP NAME
TRAIN_NAME = 'split_train_{}_{}.p'
VALID_NAME = 'split_valid_{}_{}.p'
AUGMENT_NAME = 'split_augment_{}_{}.p'
def save_pickle(path, data):
with open(path, 'wb') as handle:
pickle.dump(data, handle, protocol=4)
def load_pickle(path):
with open(path, 'rb') as handle:
return pickle.load(handle)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 0:
torch.cuda.manual_seed_all(seed)
class IMDBDataset(Dataset):
def __init__(self, file_path, tokenizer, max_len=512, mlm_flag=False):
assert os.path.isfile(file_path), 'there is no file please check again. [{}]'.format(file_path)
self.max_len = max_len
self.mlm_flag=mlm_flag
dirname, filename = os.path.split(file_path)
cache_filename = "cache_{}".format(filename)
cache_path = os.path.join(dirname, cache_filename)
if os.path.isfile(cache_path):
logger.info("***** load cache dataset [{}] *****".format(cache_path))
label, text, augment_text = load_pickle(cache_path)
else:
logger.info("***** tokenize dataset [{}] *****".format(file_path))
data = load_pickle(file_path)
label = data['label']
text = data['clean_text']
augment_text = data['backtranslated_text']
logger.info("***** dataset size [{}] *****".format(str(len(text))))
augment_text = [tokenizer.convert_tokens_to_ids(tokenizer.tokenize(t)) for t in augment_text]
augment_text = [tokenizer.build_inputs_with_special_tokens(t) for t in augment_text]
text = [tokenizer.convert_tokens_to_ids(tokenizer.tokenize(t)) for t in text]
text = [tokenizer.build_inputs_with_special_tokens(t) for t in text]
## save tokenized file
save_pickle(cache_path, (label, text, augment_text))
self.augment_text = augment_text
self.text = text
self.label = label
def __len__(self):
return len(self.label)
def __getitem__(self, item):
## real text
if len(self.text[item]) > self.max_len:
text = torch.tensor(
[self.text[item][0]]
+ self.text[item][-(self.max_len - 1): -1]
+ [self.text[item][-1]],
dtype=torch.long,
)
else:
text = torch.tensor(self.text[item], dtype=torch.long)
## augmented text
if len(self.augment_text[item]) > self.max_len:
augment_text = torch.tensor(
[self.augment_text[item][0]]
+ self.augment_text[item][-(self.max_len - 1): -1]
+ [self.augment_text[item][-1]],
dtype=torch.long,
)
else:
augment_text = torch.tensor(self.augment_text[item], dtype=torch.long)
## label
label = torch.tensor(self.label[item], dtype=torch.long)
if self.mlm_flag:
return text
return text, augment_text, label
class ResultWriter:
def __init__(self, directory):
self.dir = directory
self.hparams = None
self.load()
self.writer = dict()
def update(self, args, **results):
now = datetime.now()
date = "%s-%s %s:%s" % (now.month, now.day, now.hour, now.minute)
self.writer.update({"date": date})
self.writer.update(results)
self.writer.update(vars(args))
if self.hparams is None:
self.hparams = pd.DataFrame(self.writer, index=[0])
else:
self.hparams = self.hparams.append(self.writer, ignore_index=True)
self.save()
def save(self):
assert self.hparams is not None
self.hparams.to_csv(self.dir, index=False)
def load(self):
path = os.path.split(self.dir)[0]
if not os.path.exists(path):
os.makedirs(path)
self.hparams = None
elif os.path.exists(self.dir):
self.hparams = pd.read_csv(self.dir)
else:
self.hparams = None
```
#### File: uda_pytorch/src/divide.py
```python
import argparse
import torch
import os
import pickle
import numpy as np
import random
from sklearn.model_selection import train_test_split
from data_utils import save_pickle, load_pickle, set_seed
## trian name
TRAIN_NAME = 'split_train_{}_{}.p'
VALID_NAME = 'split_valid_{}_{}.p'
AUGMENT_NAME = 'split_augment_{}_{}.p'
def split_files(args):
assert os.path.isfile(args.label_file), 'there is no label files, --label_file [{}]'.format(args.label_file)
dirname, filename = os.path.split(args.label_file)
data = load_pickle(args.label_file)
## SPLIT data
train_idx, leftover_idx, _, leftover_label = train_test_split(list(range(len(data['label']))), data['label'],train_size=args.labeled_data_size, stratify=data['label'])
if len(leftover_idx) > args.valid_data_size:
valid_idx, unlabel_idx, _, _ = train_test_split(leftover_idx, leftover_label, train_size=args.valid_data_size, stratify=leftover_label)
else:
valid_idx = leftover_idx
unlabel_idx = []
train_data = dict((key, np.array(item)[train_idx].tolist()) for key, item in zip(data.keys(), data.values()))
valid_data = dict((key, np.array(item)[valid_idx].tolist()) for key, item in zip(data.keys(), data.values()))
unlabel_data = dict((key, np.array(item)[unlabel_idx].tolist()) for key, item in zip(data.keys(), data.values()))
if args.unlabel_file is not None and os.path.isfile(args.unlabel_file):
additional_data = load_pickle(args.unlabel_file)
for key in unlabel_data.keys():
unlabel_data[key] += additional_data[key]
if args.train_file is None:
args.train_file = TRAIN_NAME.format(args.labeled_data_size, args.valid_data_size)
train_path = os.path.join(args.output_dir, args.train_file)
save_pickle(train_path, train_data)
try:
os.remove(os.path.join(args.output_dir, "cache_" + args.train_file))
except:
pass
if args.valid_file is None:
args.valid_file = VALID_NAME.format(args.labeled_data_size, args.valid_data_size)
valid_path = os.path.join(args.output_dir, args.valid_file)
save_pickle(valid_path, valid_data)
try:
os.remove(os.path.join(args.output_dir, "cache_" + args.valid_file))
except:
pass
if args.augment_file is None:
args.augment_file = AUGMENT_NAME.format(args.labeled_data_size, args.valid_data_size)
augment_path = os.path.join(args.output_dir, args.augment_file)
save_pickle(augment_path, unlabel_data)
try:
os.remove(os.path.join(args.output_dir, "cache_" + args.augment_file))
except:
pass
args.train_file = train_path
args.valid_file = valid_path
args.augment_file = augment_path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="Save Directory",
)
parser.add_argument(
"--label_file",
default=None,
type=str,
required=True,
help="The input labeled data file (pickle).",
)
parser.add_argument(
"--unlabel_file",
default=None,
type=str,
help="The input unlabeled data file (pickle).",
)
parser.add_argument(
"--labeled_data_size",
default=20,
type=int,
help="labeled data size for train",
)
parser.add_argument(
"--valid_data_size",
default=3000,
type=int,
help="validation data size",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="name of train file.",
)
parser.add_argument(
"--valid_file",
default=None,
type=str,
help="name of valid file.",
)
parser.add_argument(
"--augment_file",
default=None,
type=str,
help="name of augment file.",
)
parser.add_argument("--seed", type=int, default=9, help="random seed for initialization")
args = parser.parse_args()
## Validation Check
if args.labeled_data_size <= 0:
raise ValueError(
"labeled_data_size must exceed 0. Please check --labeled_data_size options [{}]".format(
args.labeled_data_size
)
)
if args.valid_data_size <= 0:
raise ValueError(
"labeled_data_size must exceed 0. Please check --valid_data_size options [{}]".format(
args.valid_data_size
)
)
if args.label_file is not None:
if not os.path.isfile(args.label_file):
raise ValueError(
"There is no labeled file. Please check --label_file options [{}]".format(
args.label_file
)
)
if args.unlabel_file is not None:
if not os.path.isfile(args.unlabel_file):
raise ValueError(
"There is no unlabeled file. Please check --unlabel_file options [{}]".format(
args.unlabel_file
)
)
## make save path
os.makedirs(args.output_dir, exist_ok=True)
## set Seed
set_seed(args.seed)
def _print_config(config):
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(config))
_print_config(args)
split_files(args)
``` |
{
"source": "JoungMinJu/2021-2-OSSProj-TwoSeokTwoJoo-3",
"score": 3
} |
#### File: JoungMinJu/2021-2-OSSProj-TwoSeokTwoJoo-3/run.py
```python
from src.game import intro_screen, db
made_by = "TwoSeokTwoJu-3"
def main():
db.init_db()
is_game_quit = intro_screen()
if not is_game_quit:
intro_screen()
if __name__ == "__main__":
main()
```
#### File: 2021-2-OSSProj-TwoSeokTwoJoo-3/src/setting.py
```python
import os
import sys
import random
import pygame
from pygame import display
from pygame import mixer
from pygame import Rect
from pygame import Surface
from pygame import time
from pygame import transform
from pygame.locals import RESIZABLE, RLEACCEL
mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
display.set_caption("Coin-T-REX by_TwoSeokTwoJoo")
gamer_name = ''
scr_size = (width, height) = (800, 400)
FPS = 60
gravity = 0.65
background_col = (235, 235, 235)
black = (0, 0, 0)
white = (255, 255, 255)
green = (0, 200, 0)
orange = (255, 127, 0)
blue = (0, 0, 225)
bright_red = (255, 0, 0)
bright_green = (0, 255, 0)
bright_orange = (255, 215, 0)
red = (255, 18, 18)
dark_red = (237, 0, 0)
deep_red = (201, 0, 0)
dark_blue = (10, 112, 138)
dark_pink = (147, 0, 0)
large_font = pygame.font.Font('DungGeunMo.ttf', 75)
font = pygame.font.Font('DungGeunMo.ttf', 32)
small_font = pygame.font.Font('DungGeunMo.ttf', 25)
xsmall_font = pygame.font.Font('DungGeunMo.ttf', 15)
full_screen = False
monitor_size = (monitor_width, monitor_height) = (display.Info().current_w,
display.Info().current_h)
high_score = 0
resized_screen = display.set_mode((scr_size), RESIZABLE)
screen = resized_screen.copy()
resized_screen_center = (0, 0)
r_width = resized_screen.get_width()
r_height = resized_screen.get_height()
button_offset = 0.2
select_offset = 0.15
width_offset = 0.3
clock = time.Clock()
on_pushtime = 0
off_pushtime = 0
dino_size = [44, 47]
object_size = [40, 40]
ptera_size = [46, 40]
collision_immune_time = 500
shield_time = 2000
speed_up_limit = 700
global bgm_on
bgm_on = True
jump_sound = mixer.Sound('sprites/jump.wav')
die_sound = mixer.Sound('sprites/die.wav')
check_point_sound = mixer.Sound('sprites/checkPoint.wav')
sound_vol = 0.3
# HERE: REMOVE SOUND!!
pygame.mixer.music.load('sprites/t-rex_bgm1.mp3')
pygame.mixer.music.set_volume(sound_vol)
# 게임 내에 text를 넣을때 쓰는 함수
def draw_text(text, font, surface, x, y, main_color):
text_obj = font.render(text, True, main_color)
text_rect = text_obj.get_rect()
text_rect.centerx = x
text_rect.centery = y
surface.blit(text_obj, text_rect)
def text_objects(text, font):
text_surface = font.render(text, True, (black))
return text_surface, text_surface.get_rect()
# 투명한 이미지 불러오기
def alpha_image(name, sizex=-1, sizey=-1,color_key=None):
full_name = os.path.join('sprites', name)
img = pygame.image.load(full_name)
if color_key is not None:
if color_key == -1:
color_key = img.get_at((0, 0))
img.set_colorkey(color_key, RLEACCEL)
if sizex != -1 or sizey != -1:
img = transform.scale(img, (sizex, sizey))
img.convert_alpha()
return (img, img.get_rect())
# 게임 내 image를 넣을 때 쓰는 함수
def load_image(name, sizex=-1, sizey=-1, color_key=None):
full_name = os.path.join('sprites', name)
img = pygame.image.load(full_name)
img = img.convert()
if color_key is not None:
if color_key == -1:
color_key = img.get_at((0, 0))
img.set_colorkey(color_key, RLEACCEL)
if sizex != -1 or sizey != -1:
img = transform.scale(img, (sizex, sizey))
return (img, img.get_rect())
def load_sprite_sheet(sheet_name, nx, ny,
scalex=-1, scaley=-1, color_key=None):
full_name = os.path.join('sprites', sheet_name)
sheet = pygame.image.load(full_name)
sheet = sheet.convert()
sheet_rect = sheet.get_rect()
sprites = []
sizex = sheet_rect.width / nx
sizey = sheet_rect.height / ny
for i in range(0, ny):
for j in range(0, nx):
rect = Rect((j * sizex, i * sizey, sizex, sizey))
# Rect((left, top), (width, height)) -> Rect
img = Surface(rect.size)
img = img.convert()
img.blit(sheet, (0, 0), rect)
if color_key is not None:
if color_key == -1:
color_key = img.get_at((0, 0))
img.set_colorkey(color_key, RLEACCEL)
if scalex != -1 or scaley != -1:
img = transform.scale(img, (scalex, scaley))
sprites.append(img)
sprite_rect = sprites[0].get_rect()
return sprites, sprite_rect
def disp_gameover_msg(gameover_img):
gameover_rect = gameover_img.get_rect()
gameover_rect.centerx = width / 2
gameover_rect.centery = height * 0.35
screen.blit(gameover_img, gameover_rect)
def disp_store_buttons(btn_restart, btn_save, btn_exit, btn_back):
btn_restart_rect = btn_restart.get_rect()
btn_save_rect = btn_save.get_rect()
btn_exit_rect = btn_exit.get_rect()
btn_back_rect = btn_back.get_rect()
btn_restart_rect.centerx = width * 0.2
btn_save_rect.centerx = width * (0.2 + width_offset)
btn_exit_rect.centerx = width * (0.2 + 2 * width_offset)
btn_back_rect.centerx = width * 0.1
btn_restart_rect.centery = height * 0.5
btn_save_rect.centery = height * 0.5
btn_exit_rect.centery = height * 0.5
btn_back_rect.centery = height * 0.1
screen.blit(btn_restart, btn_restart_rect)
screen.blit(btn_save, btn_save_rect)
screen.blit(btn_exit, btn_exit_rect)
screen.blit(btn_back, btn_back_rect)
def disp_gameover_buttons(btn_restart, btn_save, btn_exit):
btn_restart_rect = btn_restart.get_rect()
btn_save_rect = btn_save.get_rect()
btn_exit_rect = btn_exit.get_rect()
btn_restart_rect.centerx = width * 0.25
btn_save_rect.centerx = width * 0.5
btn_exit_rect.centerx = width * 0.75
btn_restart_rect.centery = height * 0.6
btn_save_rect.centery = height * 0.6
btn_exit_rect.centery = height * 0.6
screen.blit(btn_restart, btn_restart_rect)
screen.blit(btn_save, btn_save_rect)
screen.blit(btn_exit, btn_exit_rect)
def disp_pvp_gameover_buttons(btn_restart, btn_exit):
btn_restart_rect = btn_restart.get_rect()
btn_exit_rect = btn_exit.get_rect()
btn_restart_rect.centerx = width * 0.35
btn_exit_rect.centerx = width * 0.65
btn_restart_rect.centery = height * 0.55
btn_exit_rect.centery = height * 0.55
screen.blit(btn_restart, btn_restart_rect)
screen.blit(btn_exit, btn_exit_rect)
def disp_pvp_winner_loser(player1):
win = large_font.render("WIN", True, black)
lose = large_font.render("LOSE", True, black)
if not player1.is_dead:
win_width = width * 0.17
lose_width = width * 0.70
else:
win_width = width * 0.70
lose_width = width * 0.17
screen.blit(win, (win_width, height * 0.2))
screen.blit(lose, (lose_width, height * 0.2))
def disp_intro_buttons(btn_1p, btn_2p, btn_board, btn_option):
btn_1p_rect = btn_1p.get_rect()
btn_2p_rect = btn_2p.get_rect()
btn_board_rect = btn_board.get_rect()
btn_option_rect = btn_option.get_rect()
btn_1p_rect.centerx = width * 0.8
btn_2p_rect.centerx = width * 0.8
btn_board_rect.centerx = width * 0.8
btn_option_rect.centerx = width * 0.8
btn_1p_rect.centery = height * 0.25
btn_2p_rect.centery = height * (0.25 + 0.75 * button_offset)
btn_board_rect.centery = height * (0.25 + 1.5 * button_offset)
btn_option_rect.centery = height * (0.25 + 2.25 * button_offset)
screen.blit(btn_1p, btn_1p_rect)
screen.blit(btn_2p, btn_2p_rect)
screen.blit(btn_board, btn_board_rect)
screen.blit(btn_option, btn_option_rect)
def disp_select_buttons(btn_easy, btn_hard, btn_store, btn_set, btn_back):
btn_easy_rect = btn_easy.get_rect()
btn_hard_rect = btn_hard.get_rect()
btn_store_rect = btn_store.get_rect()
btn_set_rect = btn_set.get_rect()
btn_back_rect = btn_back.get_rect()
btn_easy_rect.centerx = width * 0.5
btn_hard_rect.centerx = width * 0.5
btn_store_rect.centerx = width * 0.5
btn_set_rect.centerx = width * 0.5
btn_back_rect.centerx = width * 0.1
btn_easy_rect.centery = height * 0.26
btn_hard_rect.centery = height * (0.26 + select_offset)
btn_store_rect.centery = height * (0.26 + 2 * select_offset)
btn_set_rect.centery = height * (0.26 + 3 * select_offset)
btn_back_rect.centery = height * 0.1
screen.blit(btn_easy, btn_easy_rect)
screen.blit(btn_hard, btn_hard_rect)
screen.blit(btn_store, btn_store_rect)
screen.blit(btn_set, btn_set_rect)
screen.blit(btn_back, btn_back_rect)
def check_scr_size(eventw, eventh):
if (eventw < width and eventh < height) or (eventw < width) or (eventh < height):
# 최소해상도
resized_screen = display.set_mode((scr_size), RESIZABLE)
else:
if (eventw / eventh) != (width / height):
# 고정화면비
adjusted_height = int(eventw / (width / height))
resized_screen = display.set_mode((eventw, adjusted_height), RESIZABLE)
def full_screen_issue():
global scr_size
resized_screen = display.set_mode((scr_size), RESIZABLE)
resized_screen = display.set_mode((scr_size), RESIZABLE)
def extract_digits(number):
if number > -1:
digits = []
i = 0
while ((number / 10) != 0):
digits.append(number % 10)
number = int(number / 10)
digits.append(number % 10)
for i in range(len(digits), 5):
digits.append(0)
digits.reverse()
return digits
def resize(name, w, h, color):
global width, height, resized_screen
print("resized_screen: (", resized_screen.get_width(),
",", resized_screen.get_height(), ")")
return (name, w * resized_screen.get_width() // width,
h * resized_screen.get_height() // height, color)
def text_size(size):
font = pygame.font.Font('DungGeunMo.ttf', size)
return font
def score_board(btn_back):
btn_back_rect = btn_back.get_rect()
btn_back_rect.centerx = width * 0.1
btn_back_rect.centery = height * 0.1
screen.blit(btn_back, btn_back_rect)
``` |
{
"source": "jounikuj/snakemake-qiaseq-mirna",
"score": 3
} |
#### File: workflow/scripts/merge_read_counts.py
```python
import pandas as pd
def merge_read_counts(input_files, output_file):
"""
Merges sample-specific RNA counts into one .tsv file.
First column contains the RNA identifier and rest of
the columns represent individual samples.
"""
# Loop through each input file.
for i in range(0, len(input_files)):
# Print status message.
print("Reading counts from {}...".format(input_files[i]))
# Import count table as pd.DataFrame,
# use first column as an index.
df = pd.read_table(input_files[i], index_col="Molecule")
# Extract sample ID from file path.
# Rename second column according to sample ID.
sample_id = input_files[i].split("/")[1]
df = df.rename(columns={"Count":sample_id})
if i == 0:
# If this is the first count table to iterate over, use it as a
# template for merged count table.
merged_df = df
else:
# If iterated count table is not the first one, join table with
# the pre-existing merged table.
merged_df = merged_df.join(df, how="outer")
# Fill empty values (no expression) with zero.
merged_df = merged_df.fillna(0)
# Write merged pd.DataFrame to tab-delimited file.
merged_df.to_csv(output_file[0], sep="\t")
# Call method.
merge_read_counts(snakemake.input, snakemake.output)
``` |
{
"source": "JouniKuusisto/ecg_preprocessing",
"score": 3
} |
#### File: JouniKuusisto/ecg_preprocessing/ecg.py
```python
import wfdb
import os
import pandas as pd
import numpy as np
import ast
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
#path of locally saved dataset
PATH="C:/Users/Jouni/Datasets/ptb-xl-a-large-publicly-available-electrocardiography-dataset-1.0.1/"
#following database-file contains human interpretations of the ECG, including information on rhythm during the ECG recording
y_df = pd.read_csv(PATH+"ptbxl_database.csv", index_col = "ecg_id")
y_df["scp_codes"] = y_df["scp_codes"].apply(lambda x: ast.literal_eval(x))
n_afib = 0
n_sr = 0
#for this ML project relevant label information is gathered from "ptbxl_database.csv" files column "scp_codes"
y_df["normal_ecg"] = y_df.scp_codes.apply(lambda x: int("NORM" in x.keys()))
y_df["normal_rhythm"] = y_df.scp_codes.apply(lambda x: int("SR" in x.keys()))
y_df["atrial_fibrillation"] = y_df.scp_codes.apply(lambda x: int("AFIB" in x.keys()))
#following calculates FFT of all the summed ECG signal - all channels are added together and the FFT is performed
def karvalakki_ft(file, n_peaks = False, plot = True, raja = False, limit = False):
record = wfdb.rdrecord(file)
signaali = record.p_signal
if limit:
signaali = signaali[:limit,:]
summasignaali = np.sum(signaali, axis = 1)
ft = np.fft.rfft(summasignaali)
abs_ft = np.abs(ft)
power_spectrum = np.square(abs_ft)
frequency = np.linspace(0, record.fs/2, len(power_spectrum))
power_spectrum[0:5] = 0 # we dont need baseline
peaks = find_peaks(power_spectrum, height=np.max(power_spectrum)/200, distance=5)
if n_peaks:
sorted_indices = peaks[1]["peak_heights"].argsort()
indices = np.flip(sorted_indices[-n_peaks:])
peak_magnitudes = peaks[1]["peak_heights"][indices]
peak_frequencies = peaks[0][indices]
else:
sorted_indices = peaks[1]["peak_heights"].argsort()
indices = np.flip(sorted_indices)
peak_magnitudes = peaks[1]["peak_heights"][indices]
peak_frequencies = peaks[0][indices]
if plot:
if raja:
plt.plot(frequency[0:raja], power_spectrum[0:raja])
plt.scatter(peak_frequencies/10, peak_magnitudes, c = "r") #jako 10 koska freq bin on 0.1 Hz levyinen tjs
plt.ylabel("amplitude")
plt.xlabel("frequency (Hz)")
plt.xlim(0,50)
#plt.yscale("log")
plt.show()
else:
plt.plot(frequency, power_spectrum)
plt.scatter(peak_frequencies/10, peak_magnitudes, c = "r") #jako 10 koska freq bin on 0.1 Hz levyinen tjs
plt.ylabel("amplitude")
plt.xlabel("frequency (Hz)")
plt.xlim(0,50)
#plt.yscale("log")
plt.show()
return (power_spectrum, frequency, peak_magnitudes, peak_frequencies/10)
#for ECG visualization
def ecg_plot(file, summed = False):
record = wfdb.rdrecord(file)
signaali = record.p_signal
if summed:
signaali = np.sum(signaali, axis = 1)
plt.plot(signaali)
#this creates and returns the dataset: datapoints will be FFT data and labels are either normal "0" or atrial fibrillation "1"
def create_dataset(): # af == 1 and norm == 0
af_df = y_df[y_df.atrial_fibrillation == 1]
norm_df = y_df[y_df.normal_rhythm == 1]
m = len(af_df)+len(norm_df)
X = np.zeros((m, 20)) # rows will have 20 features: 10 highest amplitude peaks and their frequency
y = np.zeros(m)
for i in range(len(af_df)):
_ , _ , peak_magnitudes, peak_frequencies = karvalakki_ft(PATH + af_df.iloc[i].filename_hr, n_peaks = False, plot = False)
padded_peak = np.zeros(10)
padded_freq = np.zeros(10)
padded_peak[:peak_magnitudes.shape[0]] = peak_magnitudes[:10]
padded_freq[:peak_frequencies.shape[0]] = peak_frequencies[:10]
X[i,:10] = padded_peak
X[i,10:] = padded_freq
y[i] = 1
print(i)
print("af") #printing the iterator to get follow the progress
for i in range(len(norm_df)):
_ , _ , peak_magnitudes, peak_frequencies = karvalakki_ft(PATH + norm_df.iloc[i].filename_hr, n_peaks = False, plot = False)
padded_peak = np.zeros(10)
padded_freq = np.zeros(10)
padded_peak[:peak_magnitudes.shape[0]] = peak_magnitudes[:10]
padded_freq[:peak_frequencies.shape[0]] = peak_frequencies[:10]
X[i+len(af_df),:10] = padded_peak
X[i+len(af_df),10:] = padded_freq
y[i+len(af_df)] = 0
print(i)
print("norm")
return (X, y)
#by inspecting all 10 max frequencies, frequencies >= 50 could be discarded
#logarithmic transformation of features to be assessed
X, y = create_dataset()
tallennus = np.zeros((18296, 21))
tallennus[:,:-1] = X
tallennus[:,-1] = y
pd.DataFrame(tallennus).to_csv("dataset_all_freq.csv", header = False, index = False)
``` |
{
"source": "jounile/nollanet",
"score": 3
} |
#### File: tests/functional/test_change_password.py
```python
import pytest
from requests import get
from urllib.parse import urljoin
def test_change_password_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/auth/pwdreset' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/auth/pwdreset'))
assert response.status_code == 200
assert '<h2>Change password</h2>' in response.text
```
#### File: tests/functional/test_media.py
```python
import pytest
from requests import get
from urllib.parse import urljoin
def test_new_media_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newmedia' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newmedia'))
assert response.status_code == 200
assert '<h1>New media</h1>' in response.text
def test_update_media_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/update/1' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/update/1'))
assert response.status_code == 200
assert '<h1>Update media</h1>' in response.text
def test_hidden_photo(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newmedia' is submitted to creat a story (POST)
THEN check the response is valid and flash message is correct
"""
new_hidden_media = dict(mediatype_id=1, genre_id=1, storytype_id=2, country_id=1, media_topic='New topic', media_desc='description', media_text='Text content', hidden=1)
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/media/newmedia'), data=new_hidden_media, allow_redirects=True)
assert response.status_code == 200
assert '<div class="flash">New media added</div>' in response.text
def test_valid_photo(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newmedia' is submitted to creat a story (POST)
THEN check the response is valid and flash message is correct
"""
new_media = dict(mediatype_id=1, genre_id=1, storytype_id=4, country_id=1, media_topic='New photo topic', media_desc='Description', media_text='Text content', hidden=None)
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/media/newmedia'), data=new_media, allow_redirects=True)
assert response.status_code == 200
assert '<div class="flash">New media added</div>' in response.text
def test_new_media_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newmedia' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newmedia'))
assert response.status_code == 200
assert '<h1>New media</h1>' in response.text
```
#### File: tests/functional/test_news.py
```python
from requests import get
from urllib.parse import urljoin
def test_news_page(wait_for_api):
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/news/all'))
assert response.status_code == 200
assert "<h1>News</h1>" in response.text
```
#### File: tests/functional/test_uploads.py
```python
import io
import pytest
from requests import get
from urllib.parse import urljoin
def test_my_uploads_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/my/uploads' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/my/uploads'))
assert response.status_code == 200
assert '<h1>My uploads</h1>' in response.text
def test_valid_new_upload_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newupload' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newupload'))
assert response.status_code == 200
assert '<h1>New upload</h1>' in response.text
def test_invalid_new_upload_page(wait_for_api):
"""
GIVEN a user has not logged in
WHEN the '/media/newupload' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newupload'))
assert response.status_code == 200
assert '<div class="flash">Please login first</div>' in response.text
def test_new_upload(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newupload' page is posted an example image (POST)
THEN check the response is valid and the page title is correct
"""
example_file=open("./app/static/gfx/example.png","rb")
files = { 'file': example_file }
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/media/newupload'), files=files, allow_redirects=True)
assert response.status_code == 200
assert '<h1>My uploads</h1>' in response.text
#def test_remove_upload(wait_for_api, login_user):
# """
# GIVEN a user has logged in (login_user)
# WHEN the '/blob/delete' page is posted (POST)
# THEN check the response is valid and the user is logged in
# """
# valid_blob = dict(blob_path='images/*example.png', upload_id=2)
# request_session, api_url = wait_for_api
# response = request_session.post(urljoin(api_url, '/blob/delete'), data=valid_blob, allow_redirects=True)
# assert response.status_code == 200
# assert 'example.png was deleted successfully' in response.text
```
#### File: tests/functional/test_youtube.py
```python
from requests import get
from urllib.parse import urljoin
# Requires google API keys to work.
# Use testing configuration
def test_youtube_page(wait_for_api):
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/youtube/playlists'))
assert response.status_code == 200
assert "<h1>Playlists</h1>" in response.text
``` |
{
"source": "jouniluoma/ner-cmv-tagger",
"score": 2
} |
#### File: jouniluoma/ner-cmv-tagger/ner-cmv-tagger.py
```python
import os
import sys
import re
import numpy as np
from collections import deque
import functools
from multiprocessing import Pool
from multiprocessing import cpu_count
import tensorflow as tf
from common import load_ner_model, argument_parser
from pubmeddb import stream_documents, get_word_labels, tags_to_spans, write_sentences, writespans, create_samples
import datetime
def main(argv):
argparser = argument_parser()
args = argparser.parse_args(argv[1:])
infn = args.input_data
outfn = args.output_spans
out_tsv = args.output_tsv
ner_model, tokenizer, labels, config = load_ner_model(args.ner_model_dir)
ner_model.trainable = False
ner_model.training = False
seq_len = config['max_seq_length']
batch_size = args.batch_size
tag_map = { l: i for i, l in enumerate(labels) }
inv_tag_map = { v: k for k, v in tag_map.items() }
print("Preprocessing and inference starts: ", datetime.datetime.now(),flush=True)
os.makedirs(os.path.dirname(outfn), exist_ok=True)
with open(outfn, 'w+') as of:
print("CPU count ", cpu_count())
partial_create_documents = functools.partial(create_samples,tokenizer,seq_len)
with Pool(cpu_count()-1) as p:
input_docs = p.map(partial_create_documents,stream_documents(infn))
print("input docs len", len(input_docs), flush=True)
input_sentences=[]
data_list = []
num_input_sentences = 0
tok_start = 0
for count,document in enumerate(input_docs):
num_input_sentences+=len(document.tids)
data_list.append(document.data)
input_sentences.append((document.doc_id, num_input_sentences, document.text)) #Sentences per doc for writing spans
if num_input_sentences > args.sentences_on_batch:
print("num input sentences ", num_input_sentences)
print("Tok start ",tok_start)
print("count ", count)
toks = np.array([sample for samples in input_docs[tok_start:count+1] for sample in samples.tids])
seqs = np.array([sample for samples in input_docs[tok_start:count+1] for sample in samples.sids])
print("toks shape ", toks.shape)
print("seqs shape ", seqs.shape)
tok_start = count+1
print("Inference starts: ", datetime.datetime.now(),flush=True)
print(num_input_sentences, datetime.datetime.now(),flush=True)
probs = ner_model.predict((toks, seqs),batch_size=batch_size)
preds = np.argmax(probs, axis=-1)
start = 0
print("Postprocess starts: ", datetime.datetime.now(),flush=True)
for data, indices in zip(data_list, input_sentences):
token_labels=[]
for i, pred in enumerate(preds[start:indices[1]]):
token_labels.append([inv_tag_map[p] for p in pred[1:len(data.tokens[i])+1]])
start=indices[1]
word_labels = get_word_labels(
data.words, data.lengths, data.tokens, token_labels)
# Flatten and map to typed spans with offsets
with open(out_tsv,'a+') as outputfile:
write_sentences(outputfile, data.words, word_labels)
word_sequence = [w for s in data.words for w in s]
tag_sequence = [t for s in word_labels for t in s]
spans = tags_to_spans(indices[2], word_sequence, tag_sequence)
writespans(of, indices[0], spans)
input_sentences=[]
data_list =[]
num_input_sentences=0
toks = np.array([], dtype=np.int64).reshape(0,seq_len)
seqs = np.array([], dtype=np.int64).reshape(0,seq_len)
of.flush()
print("preprocess starts: ", datetime.datetime.now(),flush=True)
if input_sentences:
toks = np.array([sample for samples in input_docs[tok_start:] for sample in samples.tids])
seqs = np.array([sample for samples in input_docs[tok_start:] for sample in samples.sids])
print("Inference starts: ", datetime.datetime.now(),flush=True)
print(num_input_sentences, datetime.datetime.now(),flush=True)
probs = ner_model.predict((toks, seqs),batch_size=batch_size)
preds = np.argmax(probs, axis=-1)
start = 0
for data, indices in zip(data_list, input_sentences):
token_labels=[]
for i, pred in enumerate(preds[start:indices[1]]):
token_labels.append([inv_tag_map[p] for p in pred[1:len(data.tokens[i])+1]])
start=indices[1]
word_labels = get_word_labels(
data.words, data.lengths, data.tokens, token_labels)
with open(out_tsv,'a+') as outputfile:
write_sentences(outputfile, data.words, word_labels)
# Flatten and map to typed spans with offsets
word_sequence = [w for s in data.words for w in s]
tag_sequence = [t for s in word_labels for t in s]
spans = tags_to_spans(indices[2], word_sequence, tag_sequence)
writespans(of, indices[0], spans)
print("inference ends: ", datetime.datetime.now(),flush=True)
if __name__ == '__main__':
sys.exit(main(sys.argv))
``` |
{
"source": "JouniVatanen/NLP-and-Deep-Learning",
"score": 3
} |
#### File: src/ab_testing/server_starter.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
from flask import Flask, jsonify, request
from scipy.stats import beta
# create an app
app = Flask(__name__)
# define bandits
# there's no "pull arm" here
# since that's technically now the user/client
class Bandit:
def __init__(self, name):
self.name = name
def sample(self):
# TODO
return 1
# TODO - what else does the Bandit need to do?
# initialize bandits
banditA = Bandit('A')
banditB = Bandit('B')
@app.route('/get_ad')
def get_ad():
# TODO
return jsonify({'advertisement_id': 'A'})
@app.route('/click_ad', methods=['POST'])
def click_ad():
result = 'OK'
if request.form['advertisement_id'] == 'A':
# TODO
pass
elif request.form['advertisement_id'] == 'B':
# TODO
pass
else:
result = 'Invalid Input.'
# nothing to return really
return jsonify({'result': result})
if __name__ == '__main__':
app.run(host='127.0.0.1', port='8888')
```
#### File: src/ann_class2/adam.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from util import get_normalized_data, error_rate, cost, y2indicator
from mlp import forward, derivative_w2, derivative_w1, derivative_b2, derivative_b1
def main():
max_iter = 10
print_period = 10
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
reg = 0.01
Ytrain_ind = y2indicator(Ytrain)
Ytest_ind = y2indicator(Ytest)
N, D = Xtrain.shape
batch_sz = 500
n_batches = N // batch_sz
M = 300
K = 10
W1_0 = np.random.randn(D, M) / np.sqrt(D)
b1_0 = np.zeros(M)
W2_0 = np.random.randn(M, K) / np.sqrt(M)
b2_0 = np.zeros(K)
W1 = W1_0.copy()
b1 = b1_0.copy()
W2 = W2_0.copy()
b2 = b2_0.copy()
# 1st moment
mW1 = 0
mb1 = 0
mW2 = 0
mb2 = 0
# 2nd moment
vW1 = 0
vb1 = 0
vW2 = 0
vb2 = 0
# hyperparams
lr0 = 0.001
beta1 = 0.9
beta2 = 0.999
eps = 1e-8
# 1. Adam
loss_adam = []
err_adam = []
t = 1
for i in range(max_iter):
for j in range(n_batches):
Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),]
Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),]
pYbatch, Z = forward(Xbatch, W1, b1, W2, b2)
# updates
# gradients
gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2
gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2
gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1
gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1
# new m
mW1 = beta1 * mW1 + (1 - beta1) * gW1
mb1 = beta1 * mb1 + (1 - beta1) * gb1
mW2 = beta1 * mW2 + (1 - beta1) * gW2
mb2 = beta1 * mb2 + (1 - beta1) * gb2
# new v
vW1 = beta2 * vW1 + (1 - beta2) * gW1 * gW1
vb1 = beta2 * vb1 + (1 - beta2) * gb1 * gb1
vW2 = beta2 * vW2 + (1 - beta2) * gW2 * gW2
vb2 = beta2 * vb2 + (1 - beta2) * gb2 * gb2
# bias correction
correction1 = 1 - beta1 ** t
hat_mW1 = mW1 / correction1
hat_mb1 = mb1 / correction1
hat_mW2 = mW2 / correction1
hat_mb2 = mb2 / correction1
correction2 = 1 - beta2 ** t
hat_vW1 = vW1 / correction2
hat_vb1 = vb1 / correction2
hat_vW2 = vW2 / correction2
hat_vb2 = vb2 / correction2
# update t
t += 1
# apply updates to the params
W1 = W1 - lr0 * hat_mW1 / np.sqrt(hat_vW1 + eps)
b1 = b1 - lr0 * hat_mb1 / np.sqrt(hat_vb1 + eps)
W2 = W2 - lr0 * hat_mW2 / np.sqrt(hat_vW2 + eps)
b2 = b2 - lr0 * hat_mb2 / np.sqrt(hat_vb2 + eps)
if j % print_period == 0:
pY, _ = forward(Xtest, W1, b1, W2, b2)
l = cost(pY, Ytest_ind)
loss_adam.append(l)
print("Cost at iteration i=%d, j=%d: %.6f" % (i, j, l))
err = error_rate(pY, Ytest)
err_adam.append(err)
print("Error rate:", err)
pY, _ = forward(Xtest, W1, b1, W2, b2)
print("Final error rate:", error_rate(pY, Ytest))
# 2. RMSprop with momentum
W1 = W1_0.copy()
b1 = b1_0.copy()
W2 = W2_0.copy()
b2 = b2_0.copy()
loss_rms = []
err_rms = []
# comparable hyperparameters for fair comparison
lr0 = 0.001
mu = 0.9
decay_rate = 0.999
eps = 1e-8
# rmsprop cache
cache_W2 = 1
cache_b2 = 1
cache_W1 = 1
cache_b1 = 1
# momentum
dW1 = 0
db1 = 0
dW2 = 0
db2 = 0
for i in range(max_iter):
for j in range(n_batches):
Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),]
Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),]
pYbatch, Z = forward(Xbatch, W1, b1, W2, b2)
# updates
gW2 = derivative_w2(Z, Ybatch, pYbatch) + reg*W2
cache_W2 = decay_rate*cache_W2 + (1 - decay_rate)*gW2*gW2
dW2 = mu * dW2 + (1 - mu) * lr0 * gW2 / (np.sqrt(cache_W2) + eps)
W2 -= dW2
gb2 = derivative_b2(Ybatch, pYbatch) + reg*b2
cache_b2 = decay_rate*cache_b2 + (1 - decay_rate)*gb2*gb2
db2 = mu * db2 + (1 - mu) * lr0 * gb2 / (np.sqrt(cache_b2) + eps)
b2 -= db2
gW1 = derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1
cache_W1 = decay_rate*cache_W1 + (1 - decay_rate)*gW1*gW1
dW1 = mu * dW1 + (1 - mu) * lr0 * gW1 / (np.sqrt(cache_W1) + eps)
W1 -= dW1
gb1 = derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1
cache_b1 = decay_rate*cache_b1 + (1 - decay_rate)*gb1*gb1
db1 = mu * db1 + (1 - mu) * lr0 * gb1 / (np.sqrt(cache_b1) + eps)
b1 -= db1
if j % print_period == 0:
pY, _ = forward(Xtest, W1, b1, W2, b2)
l = cost(pY, Ytest_ind)
loss_rms.append(l)
print("Cost at iteration i=%d, j=%d: %.6f" % (i, j, l))
err = error_rate(pY, Ytest)
err_rms.append(err)
print("Error rate:", err)
pY, _ = forward(Xtest, W1, b1, W2, b2)
print("Final error rate:", error_rate(pY, Ytest))
plt.plot(loss_adam, label='adam')
plt.plot(loss_rms, label='rmsprop')
plt.legend()
plt.show()
if __name__ == '__main__':
main()
```
#### File: src/ann_class2/batch_norm_theano.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
from theano.tensor.nnet.bn import batch_normalization_train, batch_normalization_test
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from util import get_normalized_data
def init_weight(M1, M2):
return np.random.randn(M1, M2) * np.sqrt(2.0 / M1)
class HiddenLayerBatchNorm(object):
def __init__(self, M1, M2, f):
self.M1 = M1
self.M2 = M2
self.f = f
W = init_weight(M1, M2)
gamma = np.ones(M2)
beta = np.zeros(M2)
self.W = theano.shared(W)
self.gamma = theano.shared(gamma)
self.beta = theano.shared(beta)
self.params = [self.W, self.gamma, self.beta]
# for test time
# self.running_mean = T.zeros(M2)
# self.running_var = T.zeros(M2)
self.running_mean = theano.shared(np.zeros(M2))
self.running_var = theano.shared(np.zeros(M2))
def forward(self, X, is_training):
activation = X.dot(self.W)
if is_training:
# returns:
# batch-normalized output
# batch mean
# batch variance
# running mean (for later use as population mean estimate)
# running var (for later use as population var estimate)
out, batch_mean, batch_invstd, new_running_mean, new_running_var = batch_normalization_train(
activation,
self.gamma,
self.beta,
running_mean=self.running_mean,
running_var=self.running_var,
)
self.running_update = [
(self.running_mean, new_running_mean),
(self.running_var, new_running_var),
]
# if you don't trust the built-in bn function
# batch_var = 1 / (batch_invstd * batch_invstd)
# self.running_update = [
# (self.running_mean, 0.9*self.running_mean + 0.1*batch_mean),
# (self.running_var, 0.9*self.running_var + 0.1*batch_var),
# ]
else:
out = batch_normalization_test(
activation,
self.gamma,
self.beta,
self.running_mean,
self.running_var
)
return self.f(out)
class HiddenLayer(object):
def __init__(self, M1, M2, f):
self.M1 = M1
self.M2 = M2
self.f = f
W = init_weight(M1, M2)
b = np.zeros(M2)
self.W = theano.shared(W)
self.b = theano.shared(b)
self.params = [self.W, self.b]
def forward(self, X):
return self.f(X.dot(self.W) + self.b)
class ANN(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, Y, Xtest, Ytest, activation=T.nnet.relu, learning_rate=1e-2, mu=0.9, epochs=15, batch_sz=100, print_period=100, show_fig=True):
X = X.astype(np.float32)
Y = Y.astype(np.int32)
# initialize hidden layers
N, D = X.shape
self.layers = []
M1 = D
for M2 in self.hidden_layer_sizes:
h = HiddenLayerBatchNorm(M1, M2, activation)
self.layers.append(h)
M1 = M2
# final layer
K = len(set(Y))
h = HiddenLayer(M1, K, T.nnet.softmax)
self.layers.append(h)
if batch_sz is None:
batch_sz = N
# collect params for later use
self.params = []
for h in self.layers:
self.params += h.params
# for momentum
dparams = [theano.shared(np.zeros_like(p.get_value())) for p in self.params]
# note! we will need to build the output differently
# for train and test (prediction)
# set up theano functions and variables
thX = T.matrix('X')
thY = T.ivector('Y')
# for training
p_y_given_x = self.forward(thX, is_training=True)
cost = -T.mean(T.log(p_y_given_x[T.arange(thY.shape[0]), thY]))
prediction = T.argmax(p_y_given_x, axis=1)
grads = T.grad(cost, self.params)
# momentum only
updates = [
(p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads)
] + [
(dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads)
]
for layer in self.layers[:-1]:
updates += layer.running_update
train_op = theano.function(
inputs=[thX, thY],
outputs=[cost, prediction],
updates=updates,
)
# for testing
test_p_y_given_x = self.forward(thX, is_training=False)
test_prediction = T.argmax(test_p_y_given_x, axis=1)
self.predict = theano.function(
inputs=[thX],
outputs=test_prediction,
)
n_batches = N // batch_sz
costs = []
for i in range(epochs):
if n_batches > 1:
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
c, p = train_op(Xbatch, Ybatch)
costs.append(c)
if (j+1) % print_period == 0:
accuracy = np.mean(p == Ybatch)
print("epoch:", i, "batch:", j, "n_batches:", n_batches, "cost:", c, "accuracy:", accuracy)
print("Train acc:", self.score(X, Y), "Test acc:", self.score(Xtest, Ytest))
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X, is_training):
out = X
for h in self.layers[:-1]:
out = h.forward(out, is_training)
out = self.layers[-1].forward(out)
return out
def score(self, X, Y):
P = self.predict(X)
return np.mean(Y == P)
def main():
# step 1: get the data and define all the usual variables
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
ann = ANN([500, 300])
ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True)
print("Train accuracy:", ann.score(Xtrain, Ytrain))
print("Test accuracy:", ann.score(Xtest, Ytest))
if __name__ == '__main__':
main()
```
#### File: src/ann_class2/pytorch_dropout.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# Note: is helpful to look at keras_example.py first
import numpy as np
import matplotlib.pyplot as plt
from util import get_normalized_data
import torch
from torch.autograd import Variable
from torch import optim
# get the data, same as Theano + Tensorflow examples
# no need to split now, the fit() function will do it
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
# get shapes
_, D = Xtrain.shape
K = len(set(Ytrain))
# Note: no need to convert Y to indicator matrix
# the model will be a sequence of layers
model = torch.nn.Sequential()
# ANN with layers [784] -> [500] -> [300] -> [10]
# NOTE: the "p" is p_drop, not p_keep
model.add_module("dropout1", torch.nn.Dropout(p=0.2))
model.add_module("dense1", torch.nn.Linear(D, 500))
model.add_module("relu1", torch.nn.ReLU())
model.add_module("dropout2", torch.nn.Dropout(p=0.5))
model.add_module("dense2", torch.nn.Linear(500, 300))
model.add_module("relu2", torch.nn.ReLU())
model.add_module("dropout3", torch.nn.Dropout(p=0.5))
model.add_module("dense3", torch.nn.Linear(300, K))
# Note: no final softmax!
# just like Tensorflow, it's included in cross-entropy function
# define a loss function
# other loss functions can be found here:
# http://pytorch.org/docs/master/nn.html#loss-functions
loss = torch.nn.CrossEntropyLoss(size_average=True)
# Note: this returns a function!
# e.g. use it like: loss(logits, labels)
# define an optimizer
# other optimizers can be found here:
# http://pytorch.org/docs/master/optim.html
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# define the training procedure
# i.e. one step of gradient descent
# there are lots of steps
# so we encapsulate it in a function
# Note: inputs and labels are torch tensors
def train(model, loss, optimizer, inputs, labels):
# set the model to training mode
# because dropout has 2 different modes!
model.train()
inputs = Variable(inputs, requires_grad=False)
labels = Variable(labels, requires_grad=False)
# Reset gradient
optimizer.zero_grad()
# Forward
logits = model.forward(inputs)
output = loss.forward(logits, labels)
# Backward
output.backward()
# Update parameters
optimizer.step()
# what's the difference between backward() and step()?
return output.item()
# similar to train() but not doing the backprop step
def get_cost(model, loss, inputs, labels):
# set the model to testing mode
# because dropout has 2 different modes!
model.eval()
inputs = Variable(inputs, requires_grad=False)
labels = Variable(labels, requires_grad=False)
# Forward
logits = model.forward(inputs)
output = loss.forward(logits, labels)
return output.item()
# define the prediction procedure
# also encapsulate these steps
# Note: inputs is a torch tensor
def predict(model, inputs):
# set the model to testing mode
# because dropout has 2 different modes!
model.eval()
inputs = Variable(inputs, requires_grad=False)
logits = model.forward(inputs)
return logits.data.numpy().argmax(axis=1)
# return the accuracy
# labels is a torch tensor
# to get back the internal numpy data
# use the instance method .numpy()
def score(model, inputs, labels):
predictions = predict(model, inputs)
return np.mean(labels.numpy() == predictions)
### prepare for training loop ###
# convert the data arrays into torch tensors
Xtrain = torch.from_numpy(Xtrain).float()
Ytrain = torch.from_numpy(Ytrain).long()
Xtest = torch.from_numpy(Xtest).float()
Ytest = torch.from_numpy(Ytest).long()
# training parameters
epochs = 15
batch_size = 32
n_batches = Xtrain.size()[0] // batch_size
# things to keep track of
train_costs = []
test_costs = []
train_accuracies = []
test_accuracies = []
# main training loop
for i in range(epochs):
cost = 0
test_cost = 0
for j in range(n_batches):
Xbatch = Xtrain[j*batch_size:(j+1)*batch_size]
Ybatch = Ytrain[j*batch_size:(j+1)*batch_size]
cost += train(model, loss, optimizer, Xbatch, Ybatch)
# we could have also calculated the train cost here
# but I wanted to show you that we could also return it
# from the train function itself
train_acc = score(model, Xtrain, Ytrain)
test_acc = score(model, Xtest, Ytest)
test_cost = get_cost(model, loss, Xtest, Ytest)
print("Epoch: %d, cost: %f, acc: %.2f" % (i, test_cost, test_acc))
# for plotting
train_costs.append(cost / n_batches)
train_accuracies.append(train_acc)
test_costs.append(test_cost)
test_accuracies.append(test_acc)
# plot the results
plt.plot(train_costs, label='Train cost')
plt.plot(test_costs, label='Test cost')
plt.title('Cost')
plt.legend()
plt.show()
plt.plot(train_accuracies, label='Train accuracy')
plt.plot(test_accuracies, label='Test accuracy')
plt.title('Accuracy')
plt.legend()
plt.show()
```
#### File: src/ann_class2/random_search.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# import theano.tensor as T
from theano_ann import ANN
from util import get_spiral, get_clouds
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import numpy as np
def random_search():
# get the data and split into train/test
X, Y = get_spiral()
# X, Y = get_clouds()
X, Y = shuffle(X, Y)
Ntrain = int(0.7*len(X))
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
# starting hyperparameters
M = 20
nHidden = 2
log_lr = -4
log_l2 = -2 # since we always want it to be positive
max_tries = 30
# loop through all possible hyperparameter settings
best_validation_rate = 0
best_hls = None
best_lr = None
best_l2 = None
for _ in range(max_tries):
model = ANN([M]*nHidden)
model.fit(
Xtrain, Ytrain,
learning_rate=10**log_lr, reg=10**log_l2,
mu=0.99, epochs=3000, show_fig=False
)
validation_accuracy = model.score(Xtest, Ytest)
train_accuracy = model.score(Xtrain, Ytrain)
print(
"validation_accuracy: %.3f, train_accuracy: %.3f, settings: %s, %s, %s" %
(validation_accuracy, train_accuracy, [M]*nHidden, log_lr, log_l2)
)
if validation_accuracy > best_validation_rate:
best_validation_rate = validation_accuracy
best_M = M
best_nHidden = nHidden
best_lr = log_lr
best_l2 = log_l2
# select new hyperparams
nHidden = best_nHidden + np.random.randint(-1, 2) # -1, 0, or 1
nHidden = max(1, nHidden)
M = best_M + np.random.randint(-1, 2)*10
M = max(10, M)
log_lr = best_lr + np.random.randint(-1, 2)
log_l2 = best_l2 + np.random.randint(-1, 2)
print("Best validation_accuracy:", best_validation_rate)
print("Best settings:")
print("best_M:", best_M)
print("best_nHidden:", best_nHidden)
print("learning_rate:", best_lr)
print("l2:", best_l2)
if __name__ == '__main__':
random_search()
```
#### File: bayesian_ml/1/nb.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import t
from sortedcontainers import SortedList
class NB:
def fit(self, X, Y):
self.pyy = []
self.tinfo = []
N, D = X.shape
for c in (0, 1):
pyy_c = (1.0 + np.sum(Y == c)) / (N + 1.0 + 1.0)
self.pyy.append(pyy_c)
# for each dimension, we need to store the data we need to calculate
# the posterior predictive distribution
# t-distribution with 3 params: df, center, scale
Xc = X[Y == c]
tinfo_c = []
for d in xrange(D):
# first calculate the parameters of the normal gamma
xbar = Xc[:,d].mean()
mu = N*xbar / (1.0 + N)
precision = 1.0 + N
alpha = 1.0 + N/2.0
beta = 1.0 + 0.5*Xc[:,d].var()*N + 0.5*N*(xbar*xbar)/precision
tinfo_cd = {
'df': 2*alpha,
'center': mu,
'scale': np.sqrt( beta*(precision + 1)/(alpha * precision) ),
}
tinfo_c.append(tinfo_cd)
self.tinfo.append(tinfo_c)
def predict_proba(self, X):
N, D = X.shape
# P = np.zeros(N)
# for n in xrange(N):
# x = X[n]
# pyx = []
# for c in (0, 1):
# pycx = self.pyy[c]
# for d in xrange(D):
# tinfo_cd = self.tinfo[c][d]
# pdf_d = t.pdf(x[d], df=tinfo_cd['df'], loc=tinfo_cd['center'], scale=tinfo_cd['scale'])
# pycx *= pdf_d
# pyx.append(pycx)
# py1x = pyx[1] / (pyx[0] + pyx[1])
# # print "p(y=1|x):", py1x
# P[n] = py1x
posteriors = np.zeros((N, 2))
for c in (0, 1):
probability_matrix = np.zeros((N, D))
for d in xrange(D):
tinfo_cd = self.tinfo[c][d]
pdf_d = t.pdf(X[:,d], df=tinfo_cd['df'], loc=tinfo_cd['center'], scale=tinfo_cd['scale'])
probability_matrix[:,d] = pdf_d
posteriors_c = np.prod(probability_matrix, axis=1)*self.pyy[c]
posteriors[:,c] = posteriors_c
P = posteriors[:,1] / np.sum(posteriors, axis=1)
return P
def predict(self, X):
return np.round(self.predict_proba(X))
def score(self, X, Y):
return np.mean(self.predict(X) == Y)
def confusion_matrix(self, X, Y):
P = self.predict(X)
M = np.zeros((2, 2))
M[0,0] = np.sum(P[Y == 0] == Y[Y == 0])
M[0,1] = np.sum(P[Y == 0] != Y[Y == 0])
M[1,0] = np.sum(P[Y == 1] != Y[Y == 1])
M[1,1] = np.sum(P[Y == 1] == Y[Y == 1])
return M
def get_3_misclassified(self, X, Y):
P = self.predict(X)
N = len(Y)
samples = np.random.choice(N, 3, replace=False, p=(P != Y)/float(np.sum(P != Y)))
return X[samples], Y[samples], P[samples]
def get_3_most_ambiguous(self, X, Y):
P = self.predict_proba(X)
N = len(X)
sl = SortedList(load=3) # stores (distance, sample index) tuples
for n in xrange(N):
p = P[n]
dist = np.abs(p - 0.5)
if len(sl) < 3:
sl.add( (dist, n) )
else:
if dist < sl[-1][0]:
del sl[-1]
sl.add( (dist, n) )
indexes = [v for k, v in sl]
return X[indexes], Y[indexes]
def plot_image(x, Q, title):
im = Q.dot(x)
plt.imshow(im.reshape(28,28), cmap='gray')
plt.title(title)
plt.show()
if __name__ == '__main__':
Xtrain = pd.read_csv('Xtrain.csv', header=None).as_matrix()
Xtest = pd.read_csv('Xtest.csv', header=None).as_matrix()
Ytrain = pd.read_csv('ytrain.csv', header=None).as_matrix().flatten()
# print "Ytrain.shape:", Ytrain.shape
Ytest = pd.read_csv('ytest.csv', header=None).as_matrix().flatten()
model = NB()
model.fit(Xtrain, Ytrain)
print "train accuracy:", model.score(Xtrain, Ytrain)
print "test accuracy:", model.score(Xtest, Ytest)
# confusion matrix
M = model.confusion_matrix(Xtest, Ytest)
print "confusion matrix:"
print M
print "N:", len(Ytest)
print "sum(M):", M.sum()
# plot 3 misclassified
Q = pd.read_csv('Q.csv', header=None).as_matrix()
misclassified, targets, predictions = model.get_3_misclassified(Xtrain, Ytrain)
for x, y, p in zip(misclassified, targets, predictions):
plot_image(x, Q, 'misclassified target=%s prediction=%s' % (y, int(p)))
# ambiguous
ambiguous, targets = model.get_3_most_ambiguous(Xtrain, Ytrain)
for x, y in zip(ambiguous, targets):
plot_image(x, Q, 'ambiguous target=%s' % y)
```
#### File: bayesian_ml/4/npbgmm.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from scipy.stats import multivariate_normal as mvn, dirichlet, wishart
from scipy.special import digamma, gamma
# scipy wishart!!!
# parameters are df, sigma=scale
# for us, df = a, inv(sigma) = B
def marginal(x, c, m, a, B):
D = len(x)
k0 = ( c / (np.pi * (1 + c)) )**(D/2.0)
k1top = np.linalg.det(B + (c/(1+c)*np.outer(x - m, x - m)))**(-(a + 1.0)/2.0)
k1bot = np.linalg.det(B)**(-a/2.0)
k1 = k1top/k1bot
k2log = 0
for d in xrange(D):
k2log += np.log(gamma( (a+1.0)/2.0 + (1.0-d)/2.0 )) - np.log(gamma( a/2.0 + (1.0-d)/2.0 ))
k2 = np.exp(k2log)
return k0*k1*k2
def normalize_phi_hat(phi_hat):
# phi_hat is a dictionary: cluster index -> non-normalized probability of that cluster
# normalization done in place so no need to return anything
total = np.sum(phi_hat.values())
for j, p_hat in phi_hat.iteritems():
phi_hat[j] = p_hat / total
def sample_cluster_identity(phi):
# phi is a dictionary: cluster index -> probability of that cluster
# print "dictionary sample from:", phi
p = np.random.random()
cumulative = 0
for j, q in phi.iteritems():
cumulative += q
if p < cumulative:
return j
# print "cumulative:", cumulative
assert(False) # should never get here because cumulative = 1 by now
def sample_from_prior(c0, m0, a0, B0):
precision0 = wishart.rvs(df=a0, scale=np.linalg.inv(B0))
cov = np.linalg.inv(precision0)
mean = mvn.rvs(mean=m0, cov=cov/c0)
return mean, cov
# samples mu, sigma from P(mu, sigma | X)
def sample_from_X(X, m0, c0, a0, B0):
N = len(X)
s = float(N)
m = (c0 / (s + c0))*m0 + (1 / (s + c0))*X.sum(axis=0)
c = s + c0
a = s + a0
meanX = X.mean(axis=0)
B = (s / (a0*s + 1)) * np.outer(meanX - m0, meanX - m0) + B0
for i in xrange(N):
B += np.outer(X[i] - meanX, X[i] - meanX)
return sample_from_prior(c, m, a, B)
def gmm(X, T=500):
N, D = X.shape
m0 = X.mean(axis=0)
c0 = 0.1
a0 = float(D)
B0 = c0*D*np.cov(X.T)
alpha0 = 1.0
# cluster assignments - originally everything is assigned to cluster 0
C = np.zeros(N)
# keep as many as we need for each gaussian
# originally we sample from the prior
# TODO: just use the function above
precision0 = wishart.rvs(df=a0, scale=np.linalg.inv(B0))
covariances = [np.linalg.inv(precision0)]
means = [mvn.rvs(mean=m0, cov=covariances[0]/c0)]
cluster_counts = [1]
K = 1
observations_per_cluster = np.zeros((T, 6))
for t in xrange(T):
if t % 20 == 0:
print t
# 1) calculate phi[i,j]
# Notes:
# MANY new clusters can be made each iteration
# A cluster can be DESTROYED if a x[i] is the only pt in cluster j and gets assigned to a new cluster
# phi = np.empty((N, K))
list_of_cluster_indices = range(K)
next_cluster_index = K
# phi = [] # TODO: do we need this at all?
for i in xrange(N):
phi_i = {}
for j in list_of_cluster_indices:
# don't loop through xrange(K) because clusters can be created or destroyed as we loop through i
nj_noti = np.sum(C[:i] == j) + np.sum(C[i+1:] == j)
if nj_noti > 0:
# existing cluster
# phi[i,j] = N(x[i] | mu[j], cov[j]) * nj_noti / (alpha0 + N - 1)
# using the sampled mu / covs
phi_i[j] = mvn.pdf(X[i], mean=means[j], cov=covariances[j]) * nj_noti / (alpha0 + N - 1.0)
# new cluster
# create a possible new cluster for every sample i
# but only keep it if sample i occupies this new cluster j'
# i.e. if C[i] = j' when we sample C[i]
# phi[i,j'] = alpha0 / (alpha0 + N - 1) * p(x[i])
# p(x[i]) is a marginal integrated over mu and precision
phi_i[next_cluster_index] = alpha0 / (alpha0 + N - 1.0) * marginal(X[i], c0, m0, a0, B0)
# normalize phi[i] and assign C[i] to its new cluster by sampling from phi[i]
normalize_phi_hat(phi_i)
# if C[i] = j' (new cluster), generate mu[j'] and cov[j']
C[i] = sample_cluster_identity(phi_i)
if C[i] == next_cluster_index:
list_of_cluster_indices.append(next_cluster_index)
next_cluster_index += 1
new_mean, new_cov = sample_from_prior(c0, m0, a0, B0)
means.append(new_mean)
covariances.append(new_cov)
# destroy any cluster with no points in it
clusters_to_remove = []
tot = 0
for j in list_of_cluster_indices:
nj = np.sum(C == j)
# print "number of pts in cluster %d:" % j, nj
tot += nj
if nj == 0:
clusters_to_remove.append(j)
# print "tot:", tot
assert(tot == N)
for j in clusters_to_remove:
list_of_cluster_indices.remove(j)
# DEBUG - make sure no clusters are empty
# counts = [np.sum(C == j) for j in list_of_cluster_indices]
# for c in counts:
# assert(c > 0)
# re-order the cluster indexes so they range from 0..new K - 1
new_C = np.zeros(N)
for new_j in xrange(len(list_of_cluster_indices)):
old_j = list_of_cluster_indices[new_j]
new_C[C == old_j] = new_j
C = new_C
K = len(list_of_cluster_indices)
list_of_cluster_indices = range(K) # redundant but if removed will break counts
cluster_counts.append(K)
# 2) calculate the new mu, covariance for every currently non-empty cluster
# i.e. SAMPLE mu, cov from the new cluster assignments
means = []
covariances = []
for j in xrange(K):
# first calculate m', c', a', B'
# then call the function that samples a mean and covariance using these
mean, cov = sample_from_X(X[C == j], m0, c0, a0, B0)
means.append(mean)
covariances.append(cov)
# plot number of observations per cluster for 6 most probable clusters per iteration
counts = sorted([np.sum(C == j) for j in list_of_cluster_indices], reverse=True)
# print "counts:", counts
if len(counts) < 6:
observations_per_cluster[t,:len(counts)] = counts
else:
observations_per_cluster[t] = counts[:6]
# plot number of clusters per iteration
plt.plot(cluster_counts)
plt.show()
# plot number of observations per cluster for 6 most probable clusters per iteration
plt.plot(observations_per_cluster)
plt.show()
def main():
X = pd.read_csv('data.txt', header=None).as_matrix()
gmm(X)
if __name__ == '__main__':
main()
```
#### File: src/cnn_class2/tf_resnet_first_layers.py
```python
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
# Let's go up to the end of the first conv block
# to make sure everything has been loaded correctly
# compared to keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.applications.resnet50 import ResNet50
from keras.models import Model
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
from tf_resnet_convblock import ConvLayer, BatchNormLayer, ConvBlock
# NOTE: dependent on your Keras version
# this script used 2.1.1
# [<keras.engine.topology.InputLayer at 0x112fe4358>,
# <keras.layers.convolutional.Conv2D at 0x112fe46a0>,
# <keras.layers.normalization.BatchNormalization at 0x112fe4630>,
# <keras.layers.core.Activation at 0x112fe4eb8>,
# <keras.layers.pooling.MaxPooling2D at 0x10ed4be48>,
# <keras.layers.convolutional.Conv2D at 0x1130723c8>,
# <keras.layers.normalization.BatchNormalization at 0x113064710>,
# <keras.layers.core.Activation at 0x113092dd8>,
# <keras.layers.convolutional.Conv2D at 0x11309e908>,
# <keras.layers.normalization.BatchNormalization at 0x11308a550>,
# <keras.layers.core.Activation at 0x11312ac88>,
# <keras.layers.convolutional.Conv2D at 0x1131207b8>,
# <keras.layers.convolutional.Conv2D at 0x1131b8da0>,
# <keras.layers.normalization.BatchNormalization at 0x113115550>,
# <keras.layers.normalization.BatchNormalization at 0x1131a01d0>,
# <keras.layers.merge.Add at 0x11322f0f0>,
# <keras.layers.core.Activation at 0x113246cf8>]
# define some additional layers so they have a forward function
class ReLULayer:
def forward(self, X):
return tf.nn.relu(X)
def get_params(self):
return []
class MaxPoolLayer:
def __init__(self, dim):
self.dim = dim
def forward(self, X):
return tf.nn.max_pool(
X,
ksize=[1, self.dim, self.dim, 1],
strides=[1, 2, 2, 1],
padding='VALID'
)
def get_params(self):
return []
class PartialResNet:
def __init__(self):
self.layers = [
# before conv block
ConvLayer(d=7, mi=3, mo=64, stride=2, padding='SAME'),
BatchNormLayer(64),
ReLULayer(),
MaxPoolLayer(dim=3),
# conv block
ConvBlock(mi=64, fm_sizes=[64, 64, 256], stride=1),
]
self.input_ = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
self.output = self.forward(self.input_)
def copyFromKerasLayers(self, layers):
self.layers[0].copyFromKerasLayers(layers[1])
self.layers[1].copyFromKerasLayers(layers[2])
self.layers[4].copyFromKerasLayers(layers[5:])
def forward(self, X):
for layer in self.layers:
X = layer.forward(X)
return X
def predict(self, X):
assert(self.session is not None)
return self.session.run(
self.output,
feed_dict={self.input_: X}
)
def set_session(self, session):
self.session = session
self.layers[0].session = session
self.layers[1].session = session
self.layers[4].set_session(session)
def get_params(self):
params = []
for layer in self.layers:
params += layer.get_params()
if __name__ == '__main__':
# you can also set weights to None, it doesn't matter
resnet = ResNet50(weights='imagenet')
# you can determine the correct layer
# by looking at resnet.layers in the console
partial_model = Model(
inputs=resnet.input,
outputs=resnet.layers[16].output
)
print(partial_model.summary())
# for layer in partial_model.layers:
# layer.trainable = False
my_partial_resnet = PartialResNet()
# make a fake image
X = np.random.random((1, 224, 224, 3))
# get keras output
keras_output = partial_model.predict(X)
# get my model output
init = tf.variables_initializer(my_partial_resnet.get_params())
# note: starting a new session messes up the Keras model
session = keras.backend.get_session()
my_partial_resnet.set_session(session)
session.run(init)
# first, just make sure we can get any output
first_output = my_partial_resnet.predict(X)
print("first_output.shape:", first_output.shape)
# copy params from Keras model
my_partial_resnet.copyFromKerasLayers(partial_model.layers)
# compare the 2 models
output = my_partial_resnet.predict(X)
diff = np.abs(output - keras_output).sum()
if diff < 1e-10:
print("Everything's great!")
else:
print("diff = %s" % diff)
```
#### File: src/cnn_class/keras_example.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Input
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import datetime
from scipy.io import loadmat
from sklearn.utils import shuffle
from benchmark import get_data, error_rate
# helper
# def y2indicator(Y):
# N = len(Y)
# K = len(set(Y))
# I = np.zeros((N, K))
# I[np.arange(N), Y] = 1
# return I
def rearrange(X):
# input is (32, 32, 3, N)
# output is (N, 32, 32, 3)
# N = X.shape[-1]
# out = np.zeros((N, 32, 32, 3), dtype=np.float32)
# for i in xrange(N):
# for j in xrange(3):
# out[i, :, :, j] = X[:, :, j, i]
# return out / 255
return (X.transpose(3, 0, 1, 2) / 255.).astype(np.float32)
# get the data
train, test = get_data()
# Need to scale! don't leave as 0..255
# Y is a N x 1 matrix with values 1..10 (MATLAB indexes by 1)
# So flatten it and make it 0..9
# Also need indicator matrix for cost calculation
Xtrain = rearrange(train['X'])
Ytrain = train['y'].flatten() - 1
del train
Xtest = rearrange(test['X'])
Ytest = test['y'].flatten() - 1
del test
# get shapes
K = len(set(Ytrain))
# make the CNN
i = Input(shape=Xtrain.shape[1:])
x = Conv2D(filters=20, kernel_size=(5, 5))(i)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Conv2D(filters=50, kernel_size=(5, 5))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Flatten()(x)
x = Dense(units=500)(x)
x = Activation('relu')(x)
x = Dropout(0.3)(x)
x = Dense(units=K)(x)
x = Activation('softmax')(x)
model = Model(inputs=i, outputs=x)
# list of losses: https://keras.io/losses/
# list of optimizers: https://keras.io/optimizers/
# list of metrics: https://keras.io/metrics/
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# note: multiple ways to choose a backend
# either theano, tensorflow, or cntk
# https://keras.io/backend/
# gives us back a <keras.callbacks.History object at 0x112e61a90>
r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=10, batch_size=32)
print("Returned:", r)
# print the available keys
# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc'])
print(r.history.keys())
# plot some data
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
# accuracies
plt.plot(r.history['acc'], label='acc')
plt.plot(r.history['val_acc'], label='val_acc')
plt.legend()
plt.show()
```
#### File: src/hmm_class/scan3.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
X = 2*np.random.randn(300) + np.sin(np.linspace(0, 3*np.pi, 300))
plt.plot(X)
plt.title("original")
plt.show()
decay = T.scalar('decay')
sequence = T.vector('sequence')
def recurrence(x, last, decay):
return (1-decay)*x + decay*last
outputs, _ = theano.scan(
fn=recurrence,
sequences=sequence,
n_steps=sequence.shape[0],
outputs_info=[np.float64(0)],
non_sequences=[decay]
)
lpf = theano.function(
inputs=[sequence, decay],
outputs=outputs,
)
Y = lpf(X, 0.99)
plt.plot(Y)
plt.title("filtered")
plt.show()
```
#### File: src/linear_regression_class/gd.py
```python
import matplotlib.pyplot as plt
lr = 1e-2
x1 = 5
x2 = -5
def J(x1, x2):
return x1**2 + x2**4
def g1(x1):
return 2*x1
def g2(x2):
return 4*x2**3
values = []
for i in range(1000):
values.append(J(x1, x2))
x1 -= lr * g1(x1)
x2 -= lr * g2(x2)
values.append(J(x1, x2))
print(x1, x2)
plt.plot(values)
plt.show()
```
#### File: src/logistic_regression_class/logistic_donut.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
N = 1000
D = 2
R_inner = 5
R_outer = 10
# distance from origin is radius + random normal
# angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
T = np.array([0]*(N//2) + [1]*(N//2)) # labels: first 50 are 0, last 50 are 1
plt.scatter(X[:,0], X[:,1], c=T)
plt.show()
# add a column of ones
# ones = np.array([[1]*N]).T # old
ones = np.ones((N, 1))
# add a column of r = sqrt(x^2 + y^2)
r = np.sqrt( (X * X).sum(axis=1) ).reshape(-1, 1)
Xb = np.concatenate((ones, r, X), axis=1)
# randomly initialize the weights
w = np.random.randn(D + 2)
# calculate the model output
z = Xb.dot(w)
def sigmoid(z):
return 1/(1 + np.exp(-z))
Y = sigmoid(z)
# calculate the cross-entropy error
def cross_entropy(T, Y):
return -(T*np.log(Y) + (1-T)*np.log(1-Y)).sum()
# let's do gradient descent 100 times
learning_rate = 0.0001
error = []
for i in range(5000):
e = cross_entropy(T, Y)
error.append(e)
if i % 500 == 0:
print(e)
# gradient descent weight udpate with regularization
w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w )
# recalculate Y
Y = sigmoid(Xb.dot(w))
plt.plot(error)
plt.title("Cross-entropy per iteration")
plt.show()
print("Final w:", w)
print("Final classification rate:", 1 - np.abs(T - np.round(Y)).sum() / N)
```
#### File: src/nlp_class2/glove_svd.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import os
import json
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import TruncatedSVD
from datetime import datetime
from sklearn.utils import shuffle
from util import find_analogies
import sys
sys.path.append(os.path.abspath('..'))
from rnn_class.util import get_wikipedia_data
from rnn_class.brown import get_sentences_with_word2idx_limit_vocab, get_sentences_with_word2idx
class Glove:
def __init__(self, D, V, context_sz):
self.D = D
self.V = V
self.context_sz = context_sz
def fit(self, sentences, cc_matrix=None):
# build co-occurrence matrix
# paper calls it X, so we will call it X, instead of calling
# the training data X
# TODO: would it be better to use a sparse matrix?
t0 = datetime.now()
V = self.V
D = self.D
if not os.path.exists(cc_matrix):
X = np.zeros((V, V))
N = len(sentences)
print("number of sentences to process:", N)
it = 0
for sentence in sentences:
it += 1
if it % 10000 == 0:
print("processed", it, "/", N)
n = len(sentence)
for i in range(n):
# i is not the word index!!!
# j is not the word index!!!
# i just points to which element of the sequence (sentence) we're looking at
wi = sentence[i]
start = max(0, i - self.context_sz)
end = min(n, i + self.context_sz)
# we can either choose only one side as context, or both
# here we are doing both
# make sure "start" and "end" tokens are part of some context
# otherwise their f(X) will be 0 (denominator in bias update)
if i - self.context_sz < 0:
points = 1.0 / (i + 1)
X[wi,0] += points
X[0,wi] += points
if i + self.context_sz > n:
points = 1.0 / (n - i)
X[wi,1] += points
X[1,wi] += points
# left side
for j in range(start, i):
wj = sentence[j]
points = 1.0 / (i - j) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# right side
for j in range(i + 1, end):
wj = sentence[j]
points = 1.0 / (j - i) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# save the cc matrix because it takes forever to create
np.save(cc_matrix, X)
else:
X = np.load(cc_matrix)
print("max in X:", X.max())
# target
logX = np.log(X + 1)
print("max in log(X):", logX.max())
print("time to build co-occurrence matrix:", (datetime.now() - t0))
# subtract global mean
mu = logX.mean()
model = TruncatedSVD(n_components=D)
Z = model.fit_transform(logX - mu)
S = np.diag(model.explained_variance_)
Sinv = np.linalg.inv(S)
self.W = Z.dot(Sinv)
self.U = model.components_.T
# calculate cost once
delta = self.W.dot(S).dot(self.U.T) + mu - logX
cost = (delta * delta).sum()
print("svd cost:", cost)
def save(self, fn):
# function word_analogies expects a (V,D) matrx and a (D,V) matrix
arrays = [self.W, self.U.T]
np.savez(fn, *arrays)
def main(we_file, w2i_file, use_brown=True, n_files=100):
if use_brown:
cc_matrix = "cc_matrix_brown.npy"
else:
cc_matrix = "cc_matrix_%s.npy" % n_files
# hacky way of checking if we need to re-load the raw data or not
# remember, only the co-occurrence matrix is needed for training
if os.path.exists(cc_matrix):
with open(w2i_file) as f:
word2idx = json.load(f)
sentences = [] # dummy - we won't actually use it
else:
if use_brown:
keep_words = set([
'king', 'man', 'woman',
'france', 'paris', 'london', 'rome', 'italy', 'britain', 'england',
'french', 'english', 'japan', 'japanese', 'chinese', 'italian',
'australia', 'australian', 'december', 'november', 'june',
'january', 'february', 'march', 'april', 'may', 'july', 'august',
'september', 'october',
])
sentences, word2idx = get_sentences_with_word2idx_limit_vocab(n_vocab=5000, keep_words=keep_words)
else:
sentences, word2idx = get_wikipedia_data(n_files=n_files, n_vocab=2000)
with open(w2i_file, 'w') as f:
json.dump(word2idx, f)
V = len(word2idx)
model = Glove(100, V, 10)
# alternating least squares method
model.fit(sentences, cc_matrix=cc_matrix)
model.save(we_file)
if __name__ == '__main__':
we = 'glove_svd_50.npz'
w2i = 'glove_word2idx_50.json'
# we = 'glove_svd_brown.npz'
# w2i = 'glove_word2idx_brown.json'
main(we, w2i, use_brown=False)
# load back embeddings
npz = np.load(we)
W1 = npz['arr_0']
W2 = npz['arr_1']
with open(w2i) as f:
word2idx = json.load(f)
idx2word = {i:w for w,i in word2idx.items()}
for concat in (True, False):
print("** concat:", concat)
if concat:
We = np.hstack([W1, W2.T])
else:
We = (W1 + W2.T) / 2
find_analogies('king', 'man', 'woman', We, word2idx, idx2word)
find_analogies('france', 'paris', 'london', We, word2idx, idx2word)
find_analogies('france', 'paris', 'rome', We, word2idx, idx2word)
find_analogies('paris', 'france', 'italy', We, word2idx, idx2word)
find_analogies('france', 'french', 'english', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'chinese', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'italian', We, word2idx, idx2word)
find_analogies('japan', 'japanese', 'australian', We, word2idx, idx2word)
find_analogies('december', 'november', 'june', We, word2idx, idx2word)
```
#### File: src/nlp_class2/pmi.py
```python
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import os, sys
import string
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz
from scipy.spatial.distance import cosine as cos_dist
from sklearn.metrics.pairwise import pairwise_distances
from glob import glob
from datetime import datetime
# input files
files = glob('../large_files/enwiki*.txt')
# unfortunately these work different ways
def remove_punctuation_2(s):
return s.translate(None, string.punctuation)
def remove_punctuation_3(s):
return s.translate(str.maketrans('','',string.punctuation))
if sys.version.startswith('2'):
remove_punctuation = remove_punctuation_2
else:
remove_punctuation = remove_punctuation_3
# max vocab size
V = 2000
# context size
context_size = 10
# word counts
all_word_counts = {}
# get the top V words
num_lines = 0
num_tokens = 0
for f in files:
for line in open(f):
# don't count headers, structured data, lists, etc...
if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'):
num_lines += 1
for word in remove_punctuation(line).lower().split():
num_tokens += 1
if word not in all_word_counts:
all_word_counts[word] = 0
all_word_counts[word] += 1
print("num_lines:", num_lines)
print("num_tokens:", num_tokens)
# words I really want to keep
keep_words = [
'king', 'man', 'queen', 'woman',
'heir', 'heiress', 'prince', 'princess',
'nephew', 'niece', 'uncle', 'aunt',
'husband', 'wife', 'brother', 'sister',
'tokyo', 'beijing', 'dallas', 'texas',
'january', 'february', 'march',
'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november', 'december',
'actor', 'actress',
'rice', 'bread', 'miami', 'florida',
'walk', 'walking', 'swim', 'swimming',
]
for w in keep_words:
all_word_counts[w] = float('inf')
# sort in descending order
all_word_counts = sorted(all_word_counts.items(), key=lambda x: x[1], reverse=True)
# keep just the top V words
# save a slot for <UNK>
V = min(V, len(all_word_counts))
top_words = [w for w, count in all_word_counts[:V-1]] + ['<UNK>']
# TODO: try it without UNK at all
# reverse the array to get word2idx mapping
word2idx = {w:i for i, w in enumerate(top_words)}
unk = word2idx['<UNK>']
# for w in ('king', 'man', 'queen', 'woman', 'france', 'paris', \
# 'london', 'england', 'italy', 'rome', \
# 'france', 'french', 'english', 'england', \
# 'japan', 'japanese', 'chinese', 'china', \
# 'italian', 'australia', 'australian' \
# 'japan', 'tokyo', 'china', 'beijing'):
# assert(w in word2idx)
if not os.path.exists('pmi_counts_%s.npz' % V):
# init counts
wc_counts = lil_matrix((V, V))
### make PMI matrix
# add counts
k = 0
# for line in open('../large_files/text8'):
for f in files:
for line in open(f):
# don't count headers, structured data, lists, etc...
if line and line[0] not in ('[', '*', '-', '|', '=', '{', '}'):
line_as_idx = []
for word in remove_punctuation(line).lower().split():
if word in word2idx:
idx = word2idx[word]
# line_as_idx.append(idx)
else:
idx = unk
# pass
line_as_idx.append(idx)
for i, w in enumerate(line_as_idx):
# keep count
k += 1
if k % 10000 == 0:
print("%s/%s" % (k, num_tokens))
start = max(0, i - context_size)
end = min(len(line_as_idx), i + context_size)
for c in line_as_idx[start:i]:
wc_counts[w, c] += 1
for c in line_as_idx[i+1:end]:
wc_counts[w, c] += 1
print("Finished counting")
save_npz('pmi_counts_%s.npz' % V, csr_matrix(wc_counts))
else:
wc_counts = load_npz('pmi_counts_%s.npz' % V)
# context counts get raised ^ 0.75
c_counts = wc_counts.sum(axis=0).A.flatten() ** 0.75
c_probs = c_counts / c_counts.sum()
c_probs = c_probs.reshape(1, V)
# PMI(w, c) = #(w, c) / #(w) / p(c)
# pmi = wc_counts / wc_counts.sum(axis=1) / c_probs # works only if numpy arrays
pmi = wc_counts.multiply(1.0 / wc_counts.sum(axis=1) / c_probs).tocsr()
# this operation changes it to a coo_matrix
# which doesn't have functions we need, e.g log1p()
# so convert it back to a csr
print("type(pmi):", type(pmi))
logX = pmi.log1p() # would be logX = np.log(pmi.A + 1) in numpy
print("type(logX):", type(logX))
logX[logX < 0] = 0
### do alternating least squares
# latent dimension
D = 100
reg = 0.1
# initialize weights
W = np.random.randn(V, D) / np.sqrt(V + D)
b = np.zeros(V)
U = np.random.randn(V, D) / np.sqrt(V + D)
c = np.zeros(V)
mu = logX.mean()
costs = []
t0 = datetime.now()
for epoch in range(10):
print("epoch:", epoch)
delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX
# cost = ( delta * delta ).sum()
cost = np.multiply(delta, delta).sum()
# * behaves differently if delta is a "matrix" object vs "array" object
costs.append(cost)
### partially vectorized updates ###
# update W
# matrix = reg*np.eye(D) + U.T.dot(U)
# for i in range(V):
# vector = (logX[i,:] - b[i] - c - mu).dot(U)
# W[i] = np.linalg.solve(matrix, vector)
# # update b
# for i in range(V):
# numerator = (logX[i,:] - W[i].dot(U.T) - c - mu).sum()
# b[i] = numerator / V #/ (1 + reg)
# # update U
# matrix = reg*np.eye(D) + W.T.dot(W)
# for j in range(V):
# vector = (logX[:,j] - b - c[j] - mu).dot(W)
# U[j] = np.linalg.solve(matrix, vector)
# # update c
# for j in range(V):
# numerator = (logX[:,j] - W.dot(U[j]) - b - mu).sum()
# c[j] = numerator / V #/ (1 + reg)
### vectorized updates ###
# vectorized update W
matrix = reg*np.eye(D) + U.T.dot(U)
vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).dot(U).T
W = np.linalg.solve(matrix, vector).T
# vectorized update b
b = (logX - W.dot(U.T) - c.reshape(1, V) - mu).sum(axis=1) / V
# vectorized update U
matrix = reg*np.eye(D) + W.T.dot(W)
vector = (logX - b.reshape(V, 1) - c.reshape(1, V) - mu).T.dot(W).T
U = np.linalg.solve(matrix, vector).T
# vectorized update c
c = (logX - W.dot(U.T) - b.reshape(V, 1) - mu).sum(axis=0) / V
print("train duration:", datetime.now() - t0)
plt.plot(costs)
plt.show()
### test it
king = W[word2idx['king']]
man = W[word2idx['man']]
queen = W[word2idx['queen']]
woman = W[word2idx['woman']]
vec = king - man + woman
# find closest
# closest = None
# min_dist = float('inf')
# for i in range(len(W)):
# dist = cos_dist(W[i], vec)
# if dist < min_dist:
# closest = i
# min_dist = dist
# set word embedding matrix
# W = (W + U) / 2
distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V)
idx = distances.argsort()[:10]
print("closest 10:")
for i in idx:
print(top_words[i], distances[i])
print("dist to queen:", cos_dist(W[word2idx['queen']], vec))
def analogy(pos1, neg1, pos2, neg2):
# don't actually use pos2 in calculation, just print what's expected
print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2))
for w in (pos1, neg1, pos2, neg2):
if w not in word2idx:
print("Sorry, %s not in word2idx" % w)
return
p1 = W[word2idx[pos1]]
n1 = W[word2idx[neg1]]
p2 = W[word2idx[pos2]]
n2 = W[word2idx[neg2]]
vec = p1 - n1 + n2
distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V)
idx = distances.argsort()[:10]
# pick the best that's not p1, n1, or n2
best_idx = -1
keep_out = [word2idx[w] for w in (pos1, neg1, neg2)]
for i in idx:
if i not in keep_out:
best_idx = i
break
print("got: %s - %s = %s - %s" % (pos1, neg1, top_words[best_idx], neg2))
print("closest 10:")
for i in idx:
print(top_words[i], distances[i])
print("dist to %s:" % pos2, cos_dist(p2, vec))
analogy('king', 'man', 'queen', 'woman')
analogy('miami', 'florida', 'dallas', 'texas')
# analogy('einstein', 'scientist', 'picasso', 'painter')
analogy('china', 'rice', 'england', 'bread')
analogy('man', 'woman', 'he', 'she')
analogy('man', 'woman', 'uncle', 'aunt')
analogy('man', 'woman', 'brother', 'sister')
analogy('man', 'woman', 'husband', 'wife')
analogy('man', 'woman', 'actor', 'actress')
analogy('man', 'woman', 'father', 'mother')
analogy('heir', 'heiress', 'prince', 'princess')
analogy('nephew', 'niece', 'uncle', 'aunt')
analogy('france', 'paris', 'japan', 'tokyo')
analogy('france', 'paris', 'china', 'beijing')
analogy('february', 'january', 'december', 'november')
analogy('france', 'paris', 'italy', 'rome')
analogy('paris', 'france', 'rome', 'italy')
analogy('france', 'french', 'england', 'english')
analogy('japan', 'japanese', 'china', 'chinese')
analogy('japan', 'japanese', 'italy', 'italian')
analogy('japan', 'japanese', 'australia', 'australian')
analogy('walk', 'walking', 'swim', 'swimming')
```
#### File: src/nlp_class2/pretrained_glove.py
```python
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# WHERE TO GET THE VECTORS:
# GloVe: https://nlp.stanford.edu/projects/glove/
# Direct link: http://nlp.stanford.edu/data/glove.6B.zip
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
def dist1(a, b):
return np.linalg.norm(a - b)
def dist2(a, b):
return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))
# pick a distance type
dist, metric = dist2, 'cosine'
# dist, metric = dist1, 'euclidean'
## more intuitive
# def find_analogies(w1, w2, w3):
# for w in (w1, w2, w3):
# if w not in word2vec:
# print("%s not in dictionary" % w)
# return
# king = word2vec[w1]
# man = word2vec[w2]
# woman = word2vec[w3]
# v0 = king - man + woman
# min_dist = float('inf')
# best_word = ''
# for word, v1 in iteritems(word2vec):
# if word not in (w1, w2, w3):
# d = dist(v0, v1)
# if d < min_dist:
# min_dist = d
# best_word = word
# print(w1, "-", w2, "=", best_word, "-", w3)
## faster
def find_analogies(w1, w2, w3):
for w in (w1, w2, w3):
if w not in word2vec:
print("%s not in dictionary" % w)
return
king = word2vec[w1]
man = word2vec[w2]
woman = word2vec[w3]
v0 = king - man + woman
distances = pairwise_distances(v0.reshape(1, D), embedding, metric=metric).reshape(V)
idxs = distances.argsort()[:4]
for idx in idxs:
word = idx2word[idx]
if word not in (w1, w2, w3):
best_word = word
break
print(w1, "-", w2, "=", best_word, "-", w3)
def nearest_neighbors(w, n=5):
if w not in word2vec:
print("%s not in dictionary:" % w)
return
v = word2vec[w]
distances = pairwise_distances(v.reshape(1, D), embedding, metric=metric).reshape(V)
idxs = distances.argsort()[1:n+1]
print("neighbors of: %s" % w)
for idx in idxs:
print("\t%s" % idx2word[idx])
# load in pre-trained word vectors
print('Loading word vectors...')
word2vec = {}
embedding = []
idx2word = []
with open('../large_files/glove.6B/glove.6B.50d.txt', encoding='utf-8') as f:
# is just a space-separated text file in the format:
# word vec[0] vec[1] vec[2] ...
for line in f:
values = line.split()
word = values[0]
vec = np.asarray(values[1:], dtype='float32')
word2vec[word] = vec
embedding.append(vec)
idx2word.append(word)
print('Found %s word vectors.' % len(word2vec))
embedding = np.array(embedding)
V, D = embedding.shape
find_analogies('king', 'man', 'woman')
find_analogies('france', 'paris', 'london')
find_analogies('france', 'paris', 'rome')
find_analogies('paris', 'france', 'italy')
find_analogies('france', 'french', 'english')
find_analogies('japan', 'japanese', 'chinese')
find_analogies('japan', 'japanese', 'italian')
find_analogies('japan', 'japanese', 'australian')
find_analogies('december', 'november', 'june')
find_analogies('miami', 'florida', 'texas')
find_analogies('einstein', 'scientist', 'painter')
find_analogies('china', 'rice', 'bread')
find_analogies('man', 'woman', 'she')
find_analogies('man', 'woman', 'aunt')
find_analogies('man', 'woman', 'sister')
find_analogies('man', 'woman', 'wife')
find_analogies('man', 'woman', 'actress')
find_analogies('man', 'woman', 'mother')
find_analogies('heir', 'heiress', 'princess')
find_analogies('nephew', 'niece', 'aunt')
find_analogies('france', 'paris', 'tokyo')
find_analogies('france', 'paris', 'beijing')
find_analogies('february', 'january', 'november')
find_analogies('france', 'paris', 'rome')
find_analogies('paris', 'france', 'italy')
nearest_neighbors('king')
nearest_neighbors('france')
nearest_neighbors('japan')
nearest_neighbors('einstein')
nearest_neighbors('woman')
nearest_neighbors('nephew')
nearest_neighbors('february')
nearest_neighbors('rome')
```
#### File: src/nlp_class2/pretrained_w2v.py
```python
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
from gensim.models import KeyedVectors
# warning: takes quite awhile
# https://code.google.com/archive/p/word2vec/
# direct link: https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing
# 3 million words and phrases
# D = 300
word_vectors = KeyedVectors.load_word2vec_format(
'../large_files/GoogleNews-vectors-negative300.bin',
binary=True
)
# convenience
# result looks like:
# [('athens', 0.6001024842262268),
# ('albert', 0.5729557275772095),
# ('holmes', 0.569324254989624),
# ('donnie', 0.5690680742263794),
# ('italy', 0.5673537254333496),
# ('toni', 0.5666348338127136),
# ('spain', 0.5661854147911072),
# ('jh', 0.5661597847938538),
# ('pablo', 0.5631559491157532),
# ('malta', 0.5620371103286743)]
def find_analogies(w1, w2, w3):
r = word_vectors.most_similar(positive=[w1, w3], negative=[w2])
print("%s - %s = %s - %s" % (w1, w2, r[0][0], w3))
def nearest_neighbors(w):
r = word_vectors.most_similar(positive=[w])
print("neighbors of: %s" % w)
for word, score in r:
print("\t%s" % word)
find_analogies('king', 'man', 'woman')
find_analogies('france', 'paris', 'london')
find_analogies('france', 'paris', 'rome')
find_analogies('paris', 'france', 'italy')
find_analogies('france', 'french', 'english')
find_analogies('japan', 'japanese', 'chinese')
find_analogies('japan', 'japanese', 'italian')
find_analogies('japan', 'japanese', 'australian')
find_analogies('december', 'november', 'june')
find_analogies('miami', 'florida', 'texas')
find_analogies('einstein', 'scientist', 'painter')
find_analogies('china', 'rice', 'bread')
find_analogies('man', 'woman', 'she')
find_analogies('man', 'woman', 'aunt')
find_analogies('man', 'woman', 'sister')
find_analogies('man', 'woman', 'wife')
find_analogies('man', 'woman', 'actress')
find_analogies('man', 'woman', 'mother')
find_analogies('heir', 'heiress', 'princess')
find_analogies('nephew', 'niece', 'aunt')
find_analogies('france', 'paris', 'tokyo')
find_analogies('france', 'paris', 'beijing')
find_analogies('february', 'january', 'november')
find_analogies('france', 'paris', 'rome')
find_analogies('paris', 'france', 'italy')
nearest_neighbors('king')
nearest_neighbors('france')
nearest_neighbors('japan')
nearest_neighbors('einstein')
nearest_neighbors('woman')
nearest_neighbors('nephew')
nearest_neighbors('february')
nearest_neighbors('rome')
```
#### File: src/nlp_class2/recursive_tensorflow.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import sys
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from datetime import datetime
from util import init_weight, get_ptb_data, display_tree
def get_labels(tree):
# must be returned in the same order as tree logits are returned
# post-order traversal
if tree is None:
return []
return get_labels(tree.left) + get_labels(tree.right) + [tree.label]
class TNN:
def __init__(self, V, D, K, activation):
self.D = D
self.f = activation
# word embedding
We = init_weight(V, D)
# linear terms
W1 = init_weight(D, D)
W2 = init_weight(D, D)
# bias
bh = np.zeros(D)
# output layer
Wo = init_weight(D, K)
bo = np.zeros(K)
# make them tensorflow variables
self.We = tf.Variable(We.astype(np.float32))
self.W1 = tf.Variable(W1.astype(np.float32))
self.W2 = tf.Variable(W2.astype(np.float32))
self.bh = tf.Variable(bh.astype(np.float32))
self.Wo = tf.Variable(Wo.astype(np.float32))
self.bo = tf.Variable(bo.astype(np.float32))
self.params = [self.We, self.W1, self.W2, self.Wo]
def fit(self, trees, lr=1e-1, mu=0.9, reg=0.1, epochs=5):
train_ops = []
costs = []
predictions = []
all_labels = []
i = 0
N = len(trees)
print("Compiling ops")
for t in trees:
i += 1
sys.stdout.write("%d/%d\r" % (i, N))
sys.stdout.flush()
logits = self.get_output(t)
labels = get_labels(t)
all_labels.append(labels)
cost = self.get_cost(logits, labels, reg)
costs.append(cost)
prediction = tf.argmax(logits, 1)
predictions.append(prediction)
train_op = tf.train.MomentumOptimizer(lr, mu).minimize(cost)
train_ops.append(train_op)
# save for later so we don't have to recompile
self.predictions = predictions
self.all_labels = all_labels
self.saver = tf.train.Saver()
init = tf.initialize_all_variables()
actual_costs = []
per_epoch_costs = []
correct_rates = []
with tf.Session() as session:
session.run(init)
for i in range(epochs):
t0 = datetime.now()
train_ops, costs, predictions, all_labels = shuffle(train_ops, costs, predictions, all_labels)
epoch_cost = 0
n_correct = 0
n_total = 0
j = 0
N = len(train_ops)
for train_op, cost, prediction, labels in zip(train_ops, costs, predictions, all_labels):
_, c, p = session.run([train_op, cost, prediction])
epoch_cost += c
actual_costs.append(c)
n_correct += np.sum(p == labels)
n_total += len(labels)
j += 1
if j % 10 == 0:
sys.stdout.write("j: %d, N: %d, c: %f\r" % (j, N, c))
sys.stdout.flush()
print(
"epoch:", i, "cost:", epoch_cost,
"elapsed time:", (datetime.now() - t0)
)
per_epoch_costs.append(epoch_cost)
correct_rates.append(n_correct / float(n_total))
self.saver.save(session, "recursive.ckpt")
plt.plot(actual_costs)
plt.title("cost per train_op call")
plt.show()
plt.plot(per_epoch_costs)
plt.title("per epoch costs")
plt.show()
plt.plot(correct_rates)
plt.title("correct rates")
plt.show()
def get_cost(self, logits, labels, reg):
cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=labels
)
)
rcost = sum(tf.nn.l2_loss(p) for p in self.params)
cost += reg*rcost
return cost
# list_of_logits is an output!
# it is added to using post-order traversal
def get_output_recursive(self, tree, list_of_logits, is_root=True):
if tree.word is not None:
# this is a leaf node
x = tf.nn.embedding_lookup(self.We, [tree.word])
else:
# this node has children
x1 = self.get_output_recursive(tree.left, list_of_logits, is_root=False)
x2 = self.get_output_recursive(tree.right, list_of_logits, is_root=False)
x = self.f(
tf.matmul(x1, self.W1) +
tf.matmul(x2, self.W2) +
self.bh)
logits = tf.matmul(x, self.Wo) + self.bo
list_of_logits.append(logits)
return x
def get_output(self, tree):
logits = []
# try:
self.get_output_recursive(tree, logits)
# except Exception as e:
# display_tree(tree)
# raise e
return tf.concat(logits, 0)
def score(self, trees):
if trees is None:
predictions = self.predictions
all_labels = self.all_labels
else:
# just build and run the predict_op for each tree
# and accumulate the total
predictions = []
all_labels = []
i = 0
N = len(trees)
print("Compiling ops")
for t in trees:
i += 1
sys.stdout.write("%d/%d\r" % (i, N))
sys.stdout.flush()
logits = self.get_output(t)
labels = get_labels(t)
all_labels.append(labels)
prediction = tf.argmax(logits, 1)
predictions.append(prediction)
n_correct = 0
n_total = 0
with tf.Session() as session:
self.saver.restore(session, "recursive.ckpt")
for prediction, y in zip(predictions, all_labels):
p = session.run(prediction)
n_correct += (p[-1] == y[-1]) # we only care about the root
n_total += len(y)
return float(n_correct) / n_total
def main():
train, test, word2idx = get_ptb_data()
train = train[:100]
test = test[:100]
V = len(word2idx)
D = 80
K = 5
model = TNN(V, D, K, tf.nn.relu)
model.fit(train)
print("train accuracy:", model.score(None))
print("test accuracy:", model.score(test))
if __name__ == '__main__':
main()
```
#### File: src/nlp_class2/rntn_theano.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import sys
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
from sklearn.utils import shuffle
from util import init_weight, get_ptb_data, display_tree
from datetime import datetime
from sklearn.metrics import f1_score
# helper for adam optimizer
# use tensorflow defaults
# def adam(cost, params, lr0=1e-4, beta1=0.9, beta2=0.999, eps=1e-8):
# grads = T.grad(cost, params)
# updates = []
# time = theano.shared(0)
# new_time = time + 1
# updates.append((time, new_time))
# lr = lr0*T.sqrt(1 - beta2**new_time) / (1 - beta1**new_time)
# for p, g in zip(params, grads):
# m = theano.shared(p.get_value() * 0.)
# v = theano.shared(p.get_value() * 0.)
# new_m = beta1*m + (1 - beta1)*g
# new_v = beta2*v + (1 - beta2)*g*g
# new_p = p - lr*new_m / (T.sqrt(new_v) + eps)
# updates.append((m, new_m))
# updates.append((v, new_v))
# updates.append((p, new_p))
# return updates
# def momentum_updates(cost, params, learning_rate=1e-3, mu=0.99):
# # momentum changes
# dparams = [theano.shared(p.get_value() * 0.) for p in params]
# updates = []
# grads = T.grad(cost, params)
# for p, dp, g in zip(params, dparams, grads):
# dp_update = mu*dp - learning_rate*g
# p_update = p + dp_update
# updates.append((dp, dp_update))
# updates.append((p, p_update))
# return updates
# def rmsprop(cost, params, lr=1e-3, decay=0.999, eps=1e-10):
# grads = T.grad(cost, params)
# caches = [theano.shared(np.ones_like(p.get_value())) for p in params]
# new_caches = [decay*c + (1. - decay)*g*g for c, g in zip(caches, grads)]
# c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)]
# g_update = [
# (p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(params, new_caches, grads)
# ]
# updates = c_update + g_update
# return updates
def adagrad(cost, params, lr, eps=1e-10):
grads = T.grad(cost, params)
caches = [theano.shared(np.ones_like(p.get_value())) for p in params]
new_caches = [c + g*g for c, g in zip(caches, grads)]
c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)]
g_update = [
(p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(params, new_caches, grads)
]
updates = c_update + g_update
return updates
class RecursiveNN:
def __init__(self, V, D, K, activation=T.tanh):
self.V = V
self.D = D
self.K = K
self.f = activation
def fit(self, trees, test_trees, reg=1e-3, epochs=8, train_inner_nodes=False):
D = self.D
V = self.V
K = self.K
N = len(trees)
We = init_weight(V, D)
W11 = np.random.randn(D, D, D) / np.sqrt(3*D)
W22 = np.random.randn(D, D, D) / np.sqrt(3*D)
W12 = np.random.randn(D, D, D) / np.sqrt(3*D)
W1 = init_weight(D, D)
W2 = init_weight(D, D)
bh = np.zeros(D)
Wo = init_weight(D, K)
bo = np.zeros(K)
self.We = theano.shared(We)
self.W11 = theano.shared(W11)
self.W22 = theano.shared(W22)
self.W12 = theano.shared(W12)
self.W1 = theano.shared(W1)
self.W2 = theano.shared(W2)
self.bh = theano.shared(bh)
self.Wo = theano.shared(Wo)
self.bo = theano.shared(bo)
self.params = [self.We, self.W11, self.W22, self.W12, self.W1, self.W2, self.bh, self.Wo, self.bo]
lr = T.scalar('learning_rate')
words = T.ivector('words')
left_children = T.ivector('left_children')
right_children = T.ivector('right_children')
labels = T.ivector('labels')
def recurrence(n, hiddens, words, left, right):
w = words[n]
# any non-word will have index -1
hiddens = T.switch(
T.ge(w, 0),
T.set_subtensor(hiddens[n], self.We[w]),
T.set_subtensor(hiddens[n],
self.f(
hiddens[left[n]].dot(self.W11).dot(hiddens[left[n]]) +
hiddens[right[n]].dot(self.W22).dot(hiddens[right[n]]) +
hiddens[left[n]].dot(self.W12).dot(hiddens[right[n]]) +
hiddens[left[n]].dot(self.W1) +
hiddens[right[n]].dot(self.W2) +
self.bh
)
)
)
return hiddens
hiddens = T.zeros((words.shape[0], D))
h, _ = theano.scan(
fn=recurrence,
outputs_info=[hiddens],
n_steps=words.shape[0],
sequences=T.arange(words.shape[0]),
non_sequences=[words, left_children, right_children],
)
py_x = T.nnet.softmax(h[-1].dot(self.Wo) + self.bo)
prediction = T.argmax(py_x, axis=1)
rcost = reg*T.sum([(p*p).sum() for p in self.params])
if train_inner_nodes:
relevant_labels = labels[labels >= 0]
cost = -T.mean(T.log(py_x[labels >= 0, relevant_labels])) + rcost
else:
cost = -T.mean(T.log(py_x[-1, labels[-1]])) + rcost
updates = adagrad(cost, self.params, lr)
self.cost_predict_op = theano.function(
inputs=[words, left_children, right_children, labels],
outputs=[cost, prediction],
allow_input_downcast=True,
)
self.train_op = theano.function(
inputs=[words, left_children, right_children, labels, lr],
outputs=[cost, prediction],
updates=updates
)
lr_ = 8e-3 # initial learning rate
costs = []
sequence_indexes = range(N)
# if train_inner_nodes:
# n_total = sum(len(words) for words, _, _, _ in trees)
# else:
# n_total = N
for i in range(epochs):
t0 = datetime.now()
sequence_indexes = shuffle(sequence_indexes)
n_correct = 0
n_total = 0
cost = 0
it = 0
for j in sequence_indexes:
words, left, right, lab = trees[j]
c, p = self.train_op(words, left, right, lab, lr_)
if np.isnan(c):
print("Cost is nan! Let's stop here. \
Why don't you try decreasing the learning rate?")
for p in self.params:
print(p.get_value().sum())
exit()
cost += c
n_correct += (p[-1] == lab[-1])
n_total += 1
it += 1
if it % 10 == 0:
sys.stdout.write(
"j/N: %d/%d correct rate so far: %f, cost so far: %f\r" %
(it, N, float(n_correct)/n_total, cost)
)
sys.stdout.flush()
# calculate the test score
n_test_correct = 0
n_test_total = 0
for words, left, right, lab in test_trees:
_, p = self.cost_predict_op(words, left, right, lab)
n_test_correct += (p[-1] == lab[-1])
n_test_total += 1
print(
"i:", i, "cost:", cost,
"train acc:", float(n_correct)/n_total,
"test acc:", float(n_test_correct)/n_test_total,
"time for epoch:", (datetime.now() - t0)
)
costs.append(cost)
plt.plot(costs)
plt.show()
def score(self, trees):
n_total = len(trees)
n_correct = 0
for words, left, right, lab in trees:
_, p = self.cost_predict_op(words, left, right, lab)
n_correct += (p[-1] == lab[-1])
return float(n_correct) / n_total
def f1_score(self, trees):
Y = []
P = []
for words, left, right, lab in trees:
_, p = self.cost_predict_op(words, left, right, lab)
Y.append(lab[-1])
P.append(p[-1])
return f1_score(Y, P, average=None).mean()
def add_idx_to_tree(tree, current_idx):
# post-order labeling of tree nodes
if tree is None:
return current_idx
current_idx = add_idx_to_tree(tree.left, current_idx)
current_idx = add_idx_to_tree(tree.right, current_idx)
tree.idx = current_idx
current_idx += 1
return current_idx
def tree2list(tree, parent_idx, is_binary=False):
if tree is None:
return [], [], [], []
words_left, left_child_left, right_child_left, labels_left = tree2list(tree.left, tree.idx, is_binary)
words_right, left_child_right, right_child_right, labels_right = tree2list(tree.right, tree.idx, is_binary)
if tree.word is None:
w = -1
left = tree.left.idx
right = tree.right.idx
else:
w = tree.word
left = -1
right = -1
words = words_left + words_right + [w]
left_child = left_child_left + left_child_right + [left]
right_child = right_child_left + right_child_right + [right]
if is_binary:
if tree.label > 2:
label = 1
elif tree.label < 2:
# else:
label = 0
else:
label = -1 # we will eventually filter these out
else:
label = tree.label
labels = labels_left + labels_right + [label]
return words, left_child, right_child, labels
def main(is_binary=True):
train, test, word2idx = get_ptb_data()
for t in train:
add_idx_to_tree(t, 0)
train = [tree2list(t, -1, is_binary) for t in train]
if is_binary:
train = [t for t in train if t[3][-1] >= 0] # for filtering binary labels
for t in test:
add_idx_to_tree(t, 0)
test = [tree2list(t, -1, is_binary) for t in test]
if is_binary:
test = [t for t in test if t[3][-1] >= 0] # for filtering binary labels
# check imbalance
# pos = 0
# neg = 0
# mid = 0
# label_counts = np.zeros(5)
# for t in train + test:
# words, left_child, right_child, labels = t
# # for l in labels:
# # if l == 0:
# # neg += 1
# # elif l == 1:
# # pos += 1
# # else:
# # mid += 1
# for l in labels:
# label_counts[l] += 1
# # print("pos / total:", float(pos) / (pos + neg + mid))
# # print("mid / total:", float(mid) / (pos + neg + mid))
# # print("neg / total:", float(neg) / (pos + neg + mid))
# print("label proportions:", label_counts / label_counts.sum())
# exit()
train = shuffle(train)
# train = train[:5000]
# n_pos = sum(t[3][-1] for t in train)
# print("n_pos train:", n_pos)
test = shuffle(test)
smalltest = test[:1000]
# n_pos = sum(t[3][-1] for t in test)
# print("n_pos test:", n_pos)
V = len(word2idx)
print("vocab size:", V)
D = 20
K = 2 if is_binary else 5
model = RecursiveNN(V, D, K)
model.fit(train, smalltest, epochs=20, train_inner_nodes=True)
print("train accuracy:", model.score(train))
print("test accuracy:", model.score(test))
print("train f1:", model.f1_score(train))
print("test f1:", model.f1_score(test))
if __name__ == '__main__':
main()
```
#### File: numpy_class/exercises/ex5.py
```python
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
def is_symmetric1(A):
return np.all(A == A.T)
def is_symmetric2(A):
rows, cols = A.shape
if rows != cols:
return False
for i in range(rows):
for j in range(cols):
if A[i,j] != A[j,i]:
return False
return True
def check(A, b):
print("Testing:", A)
assert(is_symmetric1(A) == b)
assert(is_symmetric2(A) == b)
# test the functions
A = np.zeros((3, 3))
check(A, True)
A = np.eye(3)
check(A, True)
A = np.random.randn(3, 2)
A = A.dot(A.T)
check(A, True)
A = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]])
check(A, True)
A = np.random.randn(3, 2)
check(A, False)
A = np.random.randn(3, 3)
check(A, False)
A = np.arange(9).reshape(3, 3)
check(A, False)
```
#### File: src/recommenders/preprocess2sparse.py
```python
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from scipy.sparse import lil_matrix, csr_matrix, save_npz, load_npz
# load in the data
df = pd.read_csv('../large_files/movielens-20m-dataset/edited_rating.csv')
# df = pd.read_csv('../large_files/movielens-20m-dataset/small_rating.csv')
N = df.userId.max() + 1 # number of users
M = df.movie_idx.max() + 1 # number of movies
# split into train and test
df = shuffle(df)
cutoff = int(0.8*len(df))
df_train = df.iloc[:cutoff]
df_test = df.iloc[cutoff:]
A = lil_matrix((N, M))
print("Calling: update_train")
count = 0
def update_train(row):
global count
count += 1
if count % 100000 == 0:
print("processed: %.3f" % (float(count)/cutoff))
i = int(row.userId)
j = int(row.movie_idx)
A[i,j] = row.rating
df_train.apply(update_train, axis=1)
# mask, to tell us which entries exist and which do not
A = A.tocsr()
mask = (A > 0)
save_npz("Atrain.npz", A)
# test ratings dictionary
A_test = lil_matrix((N, M))
print("Calling: update_test")
count = 0
def update_test(row):
global count
count += 1
if count % 100000 == 0:
print("processed: %.3f" % (float(count)/len(df_test)))
i = int(row.userId)
j = int(row.movie_idx)
A_test[i,j] = row.rating
df_test.apply(update_test, axis=1)
A_test = A_test.tocsr()
mask_test = (A_test > 0)
save_npz("Atest.npz", A_test)
```
#### File: rl2/atari/dqn_theano.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import copy
import gym
import os
import sys
import random
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from scipy.misc import imresize
##### testing only
# MAX_EXPERIENCES = 10000
# MIN_EXPERIENCES = 1000
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 84
K = 4 #env.action_space.n
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray.astype(np.uint8)
# TODO: can this be converted into a Theano function?
def downsample_image(A):
B = A[34:194] # select the important parts of the image
B = rgb2gray(B) # convert to grayscale
# downsample image
# changing aspect ratio doesn't significantly distort the image
# nearest neighbor interpolation produces a much sharper image
# than default bilinear
B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest')
return B
def update_state(state, obs):
obs_small = downsample_image(obs)
return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
class ReplayMemory:
def __init__(self, size=MAX_EXPERIENCES, frame_height=IM_SIZE, frame_width=IM_SIZE,
agent_history_length=4, batch_size=32):
"""
Args:
size: Integer, Number of stored transitions
frame_height: Integer, Height of a frame of an Atari game
frame_width: Integer, Width of a frame of an Atari game
agent_history_length: Integer, Number of frames stacked together to create a state
batch_size: Integer, Number of transitions returned in a minibatch
"""
self.size = size
self.frame_height = frame_height
self.frame_width = frame_width
self.agent_history_length = agent_history_length
self.batch_size = batch_size
self.count = 0
self.current = 0
# Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
# Pre-allocate memory for the states and new_states in a minibatch
self.states = np.empty((self.batch_size, self.agent_history_length,
self.frame_height, self.frame_width), dtype=np.uint8)
self.new_states = np.empty((self.batch_size, self.agent_history_length,
self.frame_height, self.frame_width), dtype=np.uint8)
self.indices = np.empty(self.batch_size, dtype=np.int32)
def add_experience(self, action, frame, reward, terminal):
"""
Args:
action: An integer-encoded action
frame: One grayscale frame of the game
reward: reward the agend received for performing an action
terminal: A bool stating whether the episode terminated
"""
if frame.shape != (self.frame_height, self.frame_width):
raise ValueError('Dimension of frame is wrong!')
self.actions[self.current] = action
self.frames[self.current, ...] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def _get_state(self, index):
if self.count is 0:
raise ValueError("The replay memory is empty!")
if index < self.agent_history_length - 1:
raise ValueError("Index must be min 3")
return self.frames[index-self.agent_history_length+1:index+1, ...]
def _get_valid_indices(self):
for i in range(self.batch_size):
while True:
index = random.randint(self.agent_history_length, self.count - 1)
if index < self.agent_history_length:
continue
if index >= self.current and index - self.agent_history_length <= self.current:
continue
if self.terminal_flags[index - self.agent_history_length:index].any():
continue
break
self.indices[i] = index
def get_minibatch(self):
"""
Returns a minibatch of self.batch_size transitions
"""
if self.count < self.agent_history_length:
raise ValueError('Not enough memories to get a minibatch')
self._get_valid_indices()
for i, idx in enumerate(self.indices):
self.states[i] = self._get_state(idx - 1)
self.new_states[i] = self._get_state(idx)
return self.states, self.actions[self.indices], self.rewards[self.indices], self.new_states, self.terminal_flags[self.indices]
def init_filter(shape):
w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[1:]))
return w.astype(np.float32)
def adam(cost, params, lr0=1e-5, beta1=0.9, beta2=0.999, eps=1e-8):
# cast
lr0 = np.float32(lr0)
beta1 = np.float32(beta1)
beta2 = np.float32(beta2)
eps = np.float32(eps)
one = np.float32(1)
zero = np.float32(0)
grads = T.grad(cost, params)
updates = []
time = theano.shared(zero)
new_time = time + one
updates.append((time, new_time))
lr = lr0*T.sqrt(one - beta2**new_time) / (one - beta1**new_time)
for p, g in zip(params, grads):
m = theano.shared(p.get_value() * zero)
v = theano.shared(p.get_value() * zero)
new_m = beta1*m + (one - beta1)*g
new_v = beta2*v + (one - beta2)*g*g
new_p = p - lr*new_m / (T.sqrt(new_v) + eps)
updates.append((m, new_m))
updates.append((v, new_v))
updates.append((p, new_p))
return updates
class ConvLayer(object):
def __init__(self, mi, mo, filtsz=5, stride=2, f=T.nnet.relu):
# mi = input feature map size
# mo = output feature map size
sz = (mo, mi, filtsz, filtsz)
W0 = init_filter(sz)
self.W = theano.shared(W0)
b0 = np.zeros(mo, dtype=np.float32)
self.b = theano.shared(b0)
self.stride = (stride, stride)
self.params = [self.W, self.b]
self.f = f
# self.cut = cut
def forward(self, X):
conv_out = conv2d(
input=X,
filters=self.W,
subsample=self.stride,
# border_mode='half',
border_mode='valid',
)
# cut off 1 pixel from each edge
# to make the output the same size as input
# like tensorflow
# if self.cut:
# conv_out = conv_out[:, : ,:self.cut ,:self.cut]
return self.f(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
class HiddenLayer:
def __init__(self, M1, M2, f=T.nnet.relu):
W = np.random.randn(M1, M2) * np.sqrt(2 / M1)
self.W = theano.shared(W.astype(np.float32))
self.b = theano.shared(np.zeros(M2).astype(np.float32))
self.params = [self.W, self.b]
self.f = f
def forward(self, X):
a = X.dot(self.W) + self.b
return self.f(a)
class DQN:
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes):
self.K = K
# inputs and targets
X = T.ftensor4('X')
G = T.fvector('G')
actions = T.ivector('actions')
# create the graph
self.conv_layers = []
num_input_filters = 4 # number of filters / color channels
current_size = IM_SIZE
for num_output_filters, filtersz, stride in conv_layer_sizes:
### not using this currently, it didn't make a difference ###
# cut = None
# if filtersz % 2 == 0: # if even
# cut = (current_size + stride - 1) // stride
layer = ConvLayer(num_input_filters, num_output_filters, filtersz, stride)
current_size = (current_size + stride - 1) // stride
# print("current_size:", current_size)
self.conv_layers.append(layer)
num_input_filters = num_output_filters
# get conv output size
Z = X / 255.0
for layer in self.conv_layers:
Z = layer.forward(Z)
conv_out = Z.flatten(ndim=2)
conv_out_op = theano.function(inputs=[X], outputs=conv_out, allow_input_downcast=True)
test = conv_out_op(np.random.randn(1, 4, IM_SIZE, IM_SIZE))
flattened_ouput_size = test.shape[1]
# build fully connected layers
self.layers = []
M1 = flattened_ouput_size
print("flattened_ouput_size:", flattened_ouput_size)
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, K, lambda x: x)
self.layers.append(layer)
# collect params for copy
self.params = []
for layer in (self.conv_layers + self.layers):
self.params += layer.params
# calculate final output and cost
Z = conv_out
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = Z
selected_action_values = Y_hat[T.arange(actions.shape[0]), actions]
cost = T.mean((G - selected_action_values)**2)
# create train function
updates = adam(cost, self.params)
# compile functions
self.train_op = theano.function(
inputs=[X, G, actions],
outputs=cost,
updates=updates,
allow_input_downcast=True
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
allow_input_downcast=True
)
def copy_from(self, other):
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = q.get_value()
p.set_value(actual)
def predict(self, X):
return self.predict_op(X)
def update(self, states, actions, targets):
return self.train_op(states, targets, actions)
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([x])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
# Sample experiences
states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch()
# Calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
# Update model
loss = model.update(states, actions, targets)
return loss
def play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_size,
epsilon,
epsilon_change,
epsilon_min):
t0 = datetime.now()
# Reset the environment
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
loss = None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done = False
while not done:
# Update target network
if total_t % TARGET_UPDATE_PERIOD == 0:
target_model.copy_from(model)
print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD))
# Take action
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
episode_reward += reward
# Save the latest experience
experience_replay_buffer.add_experience(action, obs_small, reward, done)
# Train the model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
def smooth(x):
# last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i - 99)
y[i] = float(x[start:(i+1)].sum()) / (i - start + 1)
return y
if __name__ == '__main__':
# hyperparams and initialize stuff
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_sz = 32
num_episodes = 5000
total_t = 0
experience_replay_buffer = ReplayMemory()
episode_rewards = np.zeros(num_episodes)
step_counts = np.zeros(num_episodes)
# epsilon
# decays linearly until 0.1
epsilon = 1.0
epsilon_min = 0.1
epsilon_change = (epsilon - epsilon_min) / 500000
# Create environment
env = gym.envs.make("Breakout-v0")
# Create models
model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
)
target_model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
)
print("Populating experience replay buffer...")
obs = env.reset()
obs_small = downsample_image(obs)
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
experience_replay_buffer.add_experience(action, obs_small, reward, done)
if done:
obs = env.reset()
# Play a number of episodes and learn!
t0 = datetime.now()
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_sz,
epsilon,
epsilon_change,
epsilon_min,
)
episode_rewards[i] = episode_reward
step_counts[i] = num_steps_in_episode
last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean()
last_100_avg_steps = step_counts[max(0, i - 100):i + 1].mean()
print("Episode:", i,
"Duration:", duration,
"Num steps:", num_steps_in_episode,
"Reward:", episode_reward,
"Training time per step:", "%.3f" % time_per_step,
"Avg Reward (Last 100):", "%.3f" % last_100_avg,
"Avg Steps (Last 100):", "%.1f" % last_100_avg_steps,
"Epsilon:", "%.3f" % epsilon
)
sys.stdout.flush()
print("Total duration:", datetime.now() - t0)
# Plot the smoothed returns
y = smooth(episode_rewards)
plt.plot(episode_rewards, label='orig')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
```
#### File: rl2/cartpole/q_learning_bins.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# if builtins is not defined
# sudo pip install -U future
import gym
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
# turns list of integers into an int
# Ex.
# build_state([1,2,3,4,5]) -> 12345
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
class FeatureTransformer:
def __init__(self):
# Note: to make this better you could look at how often each bin was
# actually used while running the script.
# It's not clear from the high/low values nor sample() what values
# we really expect to get.
self.cart_position_bins = np.linspace(-2.4, 2.4, 9)
self.cart_velocity_bins = np.linspace(-2, 2, 9) # (-inf, inf) (I did not check that these were good values)
self.pole_angle_bins = np.linspace(-0.4, 0.4, 9)
self.pole_velocity_bins = np.linspace(-3.5, 3.5, 9) # (-inf, inf) (I did not check that these were good values)
def transform(self, observation):
# returns an int
cart_pos, cart_vel, pole_angle, pole_vel = observation
return build_state([
to_bin(cart_pos, self.cart_position_bins),
to_bin(cart_vel, self.cart_velocity_bins),
to_bin(pole_angle, self.pole_angle_bins),
to_bin(pole_vel, self.pole_velocity_bins),
])
class Model:
def __init__(self, env, feature_transformer):
self.env = env
self.feature_transformer = feature_transformer
num_states = 10**env.observation_space.shape[0]
num_actions = env.action_space.n
self.Q = np.random.uniform(low=-1, high=1, size=(num_states, num_actions))
def predict(self, s):
x = self.feature_transformer.transform(s)
return self.Q[x]
def update(self, s, a, G):
x = self.feature_transformer.transform(s)
self.Q[x,a] += 1e-2*(G - self.Q[x,a])
def sample_action(self, s, eps):
if np.random.random() < eps:
return self.env.action_space.sample()
else:
p = self.predict(s)
return np.argmax(p)
def play_one(model, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 10000:
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
totalreward += reward
if done and iters < 199:
reward = -300
# update the model
G = reward + gamma*np.max(model.predict(observation))
model.update(prev_observation, action, G)
iters += 1
return totalreward
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
if __name__ == '__main__':
env = gym.make('CartPole-v0')
ft = FeatureTransformer()
model = Model(env, ft)
gamma = 0.9
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 10000
totalrewards = np.empty(N)
for n in range(N):
eps = 1.0/np.sqrt(n+1)
totalreward = play_one(model, eps, gamma)
totalrewards[n] = totalreward
if n % 100 == 0:
print("episode:", n, "total reward:", totalreward, "eps:", eps)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
```
#### File: src/rl3/gym_review.py
```python
import gym
import numpy as np
import matplotlib.pyplot as plt
def get_action(s, w):
return 1 if s.dot(w) > 0 else 0
def play_one_episode(env, params):
observation = env.reset()
done = False
t = 0
r = 0
while not done and t < 10000:
t += 1
action = get_action(observation, params)
observation, reward, done, info = env.step(action)
r += reward
return r
def play_multiple_episodes(env, T, params):
episode_rewards = np.empty(T)
for i in range(T):
episode_rewards[i] = play_one_episode(env, params)
avg_reward = episode_rewards.mean()
print("avg reward:", avg_reward)
return avg_reward
def random_search(env):
episode_rewards = []
best = 0
params = None
for t in range(100):
new_params = np.random.random(4)*2 - 1
avg_reward = play_multiple_episodes(env, 100, new_params)
episode_rewards.append(avg_reward)
if avg_reward > best:
params = new_params
best = avg_reward
return episode_rewards, params
if __name__ == '__main__':
env = gym.make('CartPole-v0')
episode_rewards, params = random_search(env)
plt.plot(episode_rewards)
plt.show()
# play a final set of episodes
print("***Final run with final weights***")
play_multiple_episodes(env, 100, params)
```
#### File: src/rnn_class/poetry_classifier.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from util import init_weight, get_poetry_classifier_data
class SimpleRNN:
def __init__(self, M, V):
self.M = M # hidden layer size
self.V = V # vocabulary size
def fit(self, X, Y, learning_rate=1.0, mu=0.99, reg=1.0, activation=T.tanh, epochs=500, show_fig=False):
M = self.M
V = self.V
K = len(set(Y))
print("V:", V)
X, Y = shuffle(X, Y)
Nvalid = 10
Xvalid, Yvalid = X[-Nvalid:], Y[-Nvalid:]
X, Y = X[:-Nvalid], Y[:-Nvalid]
N = len(X)
# initial weights
Wx = init_weight(V, M)
Wh = init_weight(M, M)
bh = np.zeros(M)
h0 = np.zeros(M)
Wo = init_weight(M, K)
bo = np.zeros(K)
thX, thY, py_x, prediction = self.set(Wx, Wh, bh, h0, Wo, bo, activation)
cost = -T.mean(T.log(py_x[thY]))
grads = T.grad(cost, self.params)
dparams = [theano.shared(p.get_value()*0) for p in self.params]
lr = T.scalar('learning_rate')
updates = [
(p, p + mu*dp - lr*g) for p, dp, g in zip(self.params, dparams, grads)
] + [
(dp, mu*dp - lr*g) for dp, g in zip(dparams, grads)
]
self.train_op = theano.function(
inputs=[thX, thY, lr],
outputs=[cost, prediction],
updates=updates,
allow_input_downcast=True,
)
costs = []
for i in range(epochs):
X, Y = shuffle(X, Y)
n_correct = 0
cost = 0
for j in range(N):
# we set 0 to start and 1 to end
# print "X[%d]:" % j, X[j], "len:", len(X[j])
c, p = self.train_op(X[j], Y[j], learning_rate)
# print "p:", p, "y:", Y[j]
cost += c
if p == Y[j]:
n_correct += 1
# update the learning rate
learning_rate *= 0.9999
# calculate validation accuracy
n_correct_valid = 0
for j in range(Nvalid):
p = self.predict_op(Xvalid[j])
if p == Yvalid[j]:
n_correct_valid += 1
print("i:", i, "cost:", cost, "correct rate:", (float(n_correct)/N), end=" ")
print("validation correct rate:", (float(n_correct_valid)/Nvalid))
costs.append(cost)
if show_fig:
plt.plot(costs)
plt.show()
def save(self, filename):
np.savez(filename, *[p.get_value() for p in self.params])
@staticmethod
def load(filename, activation):
# TODO: would prefer to save activation to file too
npz = np.load(filename)
Wx = npz['arr_0']
Wh = npz['arr_1']
bh = npz['arr_2']
h0 = npz['arr_3']
Wo = npz['arr_4']
bo = npz['arr_5']
V, M = Wx.shape
rnn = SimpleRNN(M, V)
rnn.set(Wx, Wh, bh, h0, Wo, bo, activation)
return rnn
def set(self, Wx, Wh, bh, h0, Wo, bo, activation):
self.f = activation
# redundant - see how you can improve it
self.Wx = theano.shared(Wx)
self.Wh = theano.shared(Wh)
self.bh = theano.shared(bh)
self.h0 = theano.shared(h0)
self.Wo = theano.shared(Wo)
self.bo = theano.shared(bo)
self.params = [self.Wx, self.Wh, self.bh, self.h0, self.Wo, self.bo]
thX = T.ivector('X')
thY = T.iscalar('Y')
def recurrence(x_t, h_t1):
# returns h(t), y(t)
h_t = self.f(self.Wx[x_t] + h_t1.dot(self.Wh) + self.bh)
y_t = T.nnet.softmax(h_t.dot(self.Wo) + self.bo)
return h_t, y_t
[h, y], _ = theano.scan(
fn=recurrence,
outputs_info=[self.h0, None],
sequences=thX,
n_steps=thX.shape[0],
)
py_x = y[-1, 0, :] # only interested in the final classification of the sequence
prediction = T.argmax(py_x)
self.predict_op = theano.function(
inputs=[thX],
outputs=prediction,
allow_input_downcast=True,
)
return thX, thY, py_x, prediction
def train_poetry():
X, Y, V = get_poetry_classifier_data(samples_per_class=500)
rnn = SimpleRNN(30, V)
rnn.fit(X, Y, learning_rate=1e-6, show_fig=True, activation=T.nnet.relu, epochs=1000)
if __name__ == '__main__':
train_poetry()
```
#### File: src/unsupervised_class2/rbm_tf.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from util import getKaggleMNIST
from autoencoder_tf import DNN
class RBM(object):
def __init__(self, D, M, an_id):
self.D = D
self.M = M
self.id = an_id
self.build(D, M)
def set_session(self, session):
self.session = session
def build(self, D, M):
# params
self.W = tf.Variable(tf.random_normal(shape=(D, M)) * np.sqrt(2.0 / M))
# note: without limiting variance, you get numerical stability issues
self.c = tf.Variable(np.zeros(M).astype(np.float32))
self.b = tf.Variable(np.zeros(D).astype(np.float32))
# data
self.X_in = tf.placeholder(tf.float32, shape=(None, D))
# conditional probabilities
# NOTE: tf.contrib.distributions.Bernoulli API has changed in Tensorflow v1.2
V = self.X_in
p_h_given_v = tf.nn.sigmoid(tf.matmul(V, self.W) + self.c)
self.p_h_given_v = p_h_given_v # save for later
# self.rng_h_given_v = tf.contrib.distributions.Bernoulli(
# probs=p_h_given_v,
# dtype=tf.float32
# )
r = tf.random_uniform(shape=tf.shape(p_h_given_v))
H = tf.to_float(r < p_h_given_v)
p_v_given_h = tf.nn.sigmoid(tf.matmul(H, tf.transpose(self.W)) + self.b)
# self.rng_v_given_h = tf.contrib.distributions.Bernoulli(
# probs=p_v_given_h,
# dtype=tf.float32
# )
r = tf.random_uniform(shape=tf.shape(p_v_given_h))
X_sample = tf.to_float(r < p_v_given_h)
# build the objective
objective = tf.reduce_mean(self.free_energy(self.X_in)) - tf.reduce_mean(self.free_energy(X_sample))
self.train_op = tf.train.AdamOptimizer(1e-2).minimize(objective)
# self.train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(objective)
# build the cost
# we won't use this to optimize the model parameters
# just to observe what happens during training
logits = self.forward_logits(self.X_in)
self.cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.X_in,
logits=logits,
)
)
def fit(self, X, epochs=1, batch_sz=100, show_fig=False):
N, D = X.shape
n_batches = N // batch_sz
costs = []
print("training rbm: %s" % self.id)
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j*batch_sz:(j*batch_sz + batch_sz)]
_, c = self.session.run((self.train_op, self.cost), feed_dict={self.X_in: batch})
if j % 10 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", c)
costs.append(c)
if show_fig:
plt.plot(costs)
plt.show()
def free_energy(self, V):
b = tf.reshape(self.b, (self.D, 1))
first_term = -tf.matmul(V, b)
first_term = tf.reshape(first_term, (-1,))
second_term = -tf.reduce_sum(
# tf.log(1 + tf.exp(tf.matmul(V, self.W) + self.c)),
tf.nn.softplus(tf.matmul(V, self.W) + self.c),
axis=1
)
return first_term + second_term
def forward_hidden(self, X):
return tf.nn.sigmoid(tf.matmul(X, self.W) + self.c)
def forward_logits(self, X):
Z = self.forward_hidden(X)
return tf.matmul(Z, tf.transpose(self.W)) + self.b
def forward_output(self, X):
return tf.nn.sigmoid(self.forward_logits(X))
def transform(self, X):
# accepts and returns a real numpy array
# unlike forward_hidden and forward_output
# which deal with tensorflow variables
return self.session.run(self.p_h_given_v, feed_dict={self.X_in: X})
def main():
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
# same as autoencoder_tf.py
Xtrain = Xtrain.astype(np.float32)
Xtest = Xtest.astype(np.float32)
_, D = Xtrain.shape
K = len(set(Ytrain))
dnn = DNN(D, [1000, 750, 500], K, UnsupervisedModel=RBM)
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
dnn.set_session(session)
dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=10)
if __name__ == '__main__':
main()
```
#### File: src/unsupervised_class2/xwing.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from util import relu, error_rate, getKaggleMNIST, init_weights
from autoencoder import momentum_updates
# new additions used to compare purity measure using GMM
import os
import sys
sys.path.append(os.path.abspath('..'))
from unsupervised_class.kmeans_mnist import purity
from sklearn.mixture import GaussianMixture
class Layer(object):
def __init__(self, m1, m2):
W = init_weights((m1, m2))
bi = np.zeros(m2, dtype=np.float32)
bo = np.zeros(m1, dtype=np.float32)
self.W = theano.shared(W)
self.bi = theano.shared(bi)
self.bo = theano.shared(bo)
self.params = [self.W, self.bi, self.bo]
def forward(self, X):
return T.nnet.sigmoid(X.dot(self.W) + self.bi)
def forwardT(self, X):
return T.nnet.sigmoid(X.dot(self.W.T) + self.bo)
class DeepAutoEncoder(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, learning_rate=0.5, mu=0.99, epochs=50, batch_sz=100, show_fig=False):
# cast hyperparams
learning_rate = np.float32(learning_rate)
mu = np.float32(mu)
N, D = X.shape
n_batches = N // batch_sz
mi = D
self.layers = []
self.params = []
for mo in self.hidden_layer_sizes:
layer = Layer(mi, mo)
self.layers.append(layer)
self.params += layer.params
mi = mo
X_in = T.matrix('X')
X_hat = self.forward(X_in)
cost = -(X_in * T.log(X_hat) + (1 - X_in) * T.log(1 - X_hat)).mean()
cost_op = theano.function(
inputs=[X_in],
outputs=cost,
)
updates = momentum_updates(cost, self.params, mu, learning_rate)
train_op = theano.function(
inputs=[X_in],
outputs=cost,
updates=updates,
)
costs = []
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j*batch_sz:(j*batch_sz + batch_sz)]
c = train_op(batch)
if j % 100 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", c)
costs.append(c)
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X):
Z = X
for layer in self.layers:
Z = layer.forward(Z)
self.map2center = theano.function(
inputs=[X],
outputs=Z,
)
for i in range(len(self.layers)-1, -1, -1):
Z = self.layers[i].forwardT(Z)
return Z
def main():
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
dae = DeepAutoEncoder([500, 300, 2])
dae.fit(Xtrain)
mapping = dae.map2center(Xtrain)
plt.scatter(mapping[:,0], mapping[:,1], c=Ytrain, s=100, alpha=0.5)
plt.show()
# purity measure from unsupervised machine learning pt 1
# NOTE: this will take a long time (i.e. just leave it overnight)
gmm = GaussianMixture(n_components=10)
gmm.fit(Xtrain)
print("Finished GMM training")
responsibilities_full = gmm.predict_proba(Xtrain)
print("full purity:", purity(Ytrain, responsibilities_full))
gmm.fit(mapping)
responsibilities_reduced = gmm.predict_proba(mapping)
print("reduced purity:", purity(Ytrain, responsibilities_reduced))
if __name__ == '__main__':
main()
```
#### File: src/unsupervised_class3/vae_tf.py
```python
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import util
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
st = None
try:
st = tf.contrib.bayesflow.stochastic_tensor
except:
# doesn't exist in later versions of TF
# we will use the reparameterization trick instead
# watch the later lecture on the reparameterization trick
# to learn about it.
pass
Normal = tf.contrib.distributions.Normal
Bernoulli = tf.contrib.distributions.Bernoulli
class DenseLayer(object):
def __init__(self, M1, M2, f=tf.nn.relu):
# self.M1 = M1
# self.M2 = M2
self.W = tf.Variable(tf.random_normal(shape=(M1, M2)) * 2 / np.sqrt(M1))
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.f = f
def forward(self, X):
return self.f(tf.matmul(X, self.W) + self.b)
class VariationalAutoencoder:
def __init__(self, D, hidden_layer_sizes):
# hidden_layer_sizes specifies the size of every layer
# in the encoder
# up to the final hidden layer Z
# the decoder will have the reverse shape
# represents a batch of training data
self.X = tf.placeholder(tf.float32, shape=(None, D))
# encoder
self.encoder_layers = []
M_in = D
for M_out in hidden_layer_sizes[:-1]:
h = DenseLayer(M_in, M_out)
self.encoder_layers.append(h)
M_in = M_out
# for convenience, we'll refer to the final encoder size as M
# also the input to the decoder size
M = hidden_layer_sizes[-1]
# the encoder's final layer output is unbounded
# so there is no activation function
# we also need 2 times as many units as specified by M_out
# since there needs to be M_out means + M_out variances
h = DenseLayer(M_in, 2 * M, f=lambda x: x)
self.encoder_layers.append(h)
# get the mean and variance / std dev of Z.
# note that the variance must be > 0
# we can get a sigma (standard dev) > 0 from an unbounded variable by
# passing it through the softplus function.
# add a small amount for smoothing.
current_layer_value = self.X
for layer in self.encoder_layers:
current_layer_value = layer.forward(current_layer_value)
self.means = current_layer_value[:, :M]
self.stddev = tf.nn.softplus(current_layer_value[:, M:]) + 1e-6
# get a sample of Z
# we need to use a stochastic tensor
# in order for the errors to be backpropagated past this point
if st is None:
# doesn't exist in later versions of Tensorflow
# we'll use the same trick we use in Theano
standard_normal = Normal(
loc=np.zeros(M, dtype=np.float32),
scale=np.ones(M, dtype=np.float32)
)
e = standard_normal.sample(tf.shape(self.means)[0])
self.Z = e * self.stddev + self.means
# note: this also works because Tensorflow
# now does the "magic" for you
# n = Normal(
# loc=self.means,
# scale=self.stddev,
# )
# self.Z = n.sample()
else:
with st.value_type(st.SampleValue()):
self.Z = st.StochasticTensor(Normal(loc=self.means, scale=self.stddev))
# to get back Q(Z), the distribution of Z
# we will later use self.Z.distribution
# decoder
self.decoder_layers = []
M_in = M
for M_out in reversed(hidden_layer_sizes[:-1]):
h = DenseLayer(M_in, M_out)
self.decoder_layers.append(h)
M_in = M_out
# the decoder's final layer should technically go through a sigmoid
# so that the final output is a binary probability (e.g. Bernoulli)
# but Bernoulli accepts logits (pre-sigmoid) so we will take those
# so no activation function is needed at the final layer
h = DenseLayer(M_in, D, f=lambda x: x)
self.decoder_layers.append(h)
# get the logits
current_layer_value = self.Z
for layer in self.decoder_layers:
current_layer_value = layer.forward(current_layer_value)
logits = current_layer_value
posterior_predictive_logits = logits # save for later
# get the output
self.X_hat_distribution = Bernoulli(logits=logits)
# take samples from X_hat
# we will call this the posterior predictive sample
self.posterior_predictive = self.X_hat_distribution.sample()
self.posterior_predictive_probs = tf.nn.sigmoid(logits)
# take sample from a Z ~ N(0, 1)
# and put it through the decoder
# we will call this the prior predictive sample
standard_normal = Normal(
loc=np.zeros(M, dtype=np.float32),
scale=np.ones(M, dtype=np.float32)
)
Z_std = standard_normal.sample(1)
current_layer_value = Z_std
for layer in self.decoder_layers:
current_layer_value = layer.forward(current_layer_value)
logits = current_layer_value
prior_predictive_dist = Bernoulli(logits=logits)
self.prior_predictive = prior_predictive_dist.sample()
self.prior_predictive_probs = tf.nn.sigmoid(logits)
# prior predictive from input
# only used for generating visualization
self.Z_input = tf.placeholder(tf.float32, shape=(None, M))
current_layer_value = self.Z_input
for layer in self.decoder_layers:
current_layer_value = layer.forward(current_layer_value)
logits = current_layer_value
self.prior_predictive_from_input_probs = tf.nn.sigmoid(logits)
# now build the cost
if st is None:
kl = -tf.log(self.stddev) + 0.5*(self.stddev**2 + self.means**2) - 0.5
kl = tf.reduce_sum(kl, axis=1)
else:
kl = tf.reduce_sum(
tf.contrib.distributions.kl_divergence(
self.Z.distribution, standard_normal
),
1
)
expected_log_likelihood = tf.reduce_sum(
self.X_hat_distribution.log_prob(self.X),
1
)
# equivalent
# expected_log_likelihood = -tf.nn.sigmoid_cross_entropy_with_logits(
# labels=self.X,
# logits=posterior_predictive_logits
# )
# expected_log_likelihood = tf.reduce_sum(expected_log_likelihood, 1)
self.elbo = tf.reduce_sum(expected_log_likelihood - kl)
self.train_op = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(-self.elbo)
# set up session and variables for later
self.init_op = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init_op)
def fit(self, X, epochs=30, batch_sz=64):
costs = []
n_batches = len(X) // batch_sz
print("n_batches:", n_batches)
for i in range(epochs):
print("epoch:", i)
np.random.shuffle(X)
for j in range(n_batches):
batch = X[j*batch_sz:(j+1)*batch_sz]
_, c, = self.sess.run((self.train_op, self.elbo), feed_dict={self.X: batch})
c /= batch_sz # just debugging
costs.append(c)
if j % 100 == 0:
print("iter: %d, cost: %.3f" % (j, c))
plt.plot(costs)
plt.show()
def transform(self, X):
return self.sess.run(
self.means,
feed_dict={self.X: X}
)
def prior_predictive_with_input(self, Z):
return self.sess.run(
self.prior_predictive_from_input_probs,
feed_dict={self.Z_input: Z}
)
def posterior_predictive_sample(self, X):
# returns a sample from p(x_new | X)
return self.sess.run(self.posterior_predictive, feed_dict={self.X: X})
def prior_predictive_sample_with_probs(self):
# returns a sample from p(x_new | z), z ~ N(0, 1)
return self.sess.run((self.prior_predictive, self.prior_predictive_probs))
def main():
X, Y = util.get_mnist()
# convert X to binary variable
X = (X > 0.5).astype(np.float32)
vae = VariationalAutoencoder(784, [200, 100])
vae.fit(X)
# plot reconstruction
done = False
while not done:
i = np.random.choice(len(X))
x = X[i]
im = vae.posterior_predictive_sample([x]).reshape(28, 28)
plt.subplot(1,2,1)
plt.imshow(x.reshape(28, 28), cmap='gray')
plt.title("Original")
plt.subplot(1,2,2)
plt.imshow(im, cmap='gray')
plt.title("Sampled")
plt.show()
ans = input("Generate another?")
if ans and ans[0] in ('n' or 'N'):
done = True
# plot output from random samples in latent space
done = False
while not done:
im, probs = vae.prior_predictive_sample_with_probs()
im = im.reshape(28, 28)
probs = probs.reshape(28, 28)
plt.subplot(1,2,1)
plt.imshow(im, cmap='gray')
plt.title("Prior predictive sample")
plt.subplot(1,2,2)
plt.imshow(probs, cmap='gray')
plt.title("Prior predictive probs")
plt.show()
ans = input("Generate another?")
if ans and ans[0] in ('n' or 'N'):
done = True
if __name__ == '__main__':
main()
``` |
{
"source": "JounyWang/TwitterTools",
"score": 3
} |
#### File: JounyWang/TwitterTools/Fanfou.py
```python
import time
import sys
import os
import Config
from fanpy import *
from Twitter import update_status,update_with_media
reload(sys)
class Fan:
def __init__(self):
self.f = Fanfou(auth=OAuth(
Config.f_consumer_key,
Config.f_consumer_secret,
Config.f_access_token,
Config.f_access_token_secret))
def get_time(self):
return time.strftime('%Y-%m-%d %H:%M:%S ',time.localtime(time.time()))
def get_timeline(self):
msg_list=[]
msg_img_dic={}
timeline = self.f.statuses.user_timeline(_id='yihuwang',count=5)
for tl in timeline:
try:
in_reply_to_screen_name= tl['in_reply_to_screen_name']
except:
in_reply_to_screen_name=''
try:
repost_screen_name = tl['repost_screen_name']
except:
repost_screen_name = ''
if in_reply_to_screen_name:
continue
if repost_screen_name:
continue
msg = tl['text']
if msg in msg_set:
continue
if 'tw' not in msg:
continue
msg_set.add(msg)
if tl['photo']:
img_url = tl['photo']['largeurl']
msg_img_dic[msg]=img_url
else:
msg_list.add(msg)
return msg_list,msg_img_dic
def run(self):
while(1):
print self.get_time()+'go on ...'
try:
msg_list,msg_img_dic = self.get_timeline()
update_status(msg_list)
update_with_media(msg_img_dic)
except Exception as e:
print self.get_time()+"failed ! \n"+ repr(e)
print self.get_time()+'sleep 10'
time.sleep(10)
if __name__ == '__main__':
msg_set=set()
fan = Fan()
fan.run()
``` |
{
"source": "JOUR491-NewsApplications/JOUR491-UNLSalaries",
"score": 2
} |
#### File: salaries/people/models.py
```python
from django.db import models
class Person(models.Model):
sort_id = models.IntegerField()
name = models.CharField(max_length=255)
name_slug = models.SlugField()
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
title = models.CharField(max_length=255, blank=True, null=True)
position_number = models.CharField(max_length=255, blank=True, null=True)
job_class_number = models.CharField(max_length=255, blank=True, null=True)
term = models.CharField(max_length=5, blank=True, null=True)
fte_status = models.FloatField()
salary = models.IntegerField()
def get_absolute_url(self):
return "http://127.0.0.1:8000/person/%s/" % self.name_slug
def __unicode__(self):
return "http://127.0.0.1:8000/" % self.name
```
#### File: salaries/people/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from people.models import Person
from haystack.query import SearchQuerySet
from forms import NotesSearchForm
def home(request):
names = Person.objects.order_by('name')
return render(request, 'index.html', {'names': names})
def personDetail(request, slug):
name = Person.objects.filter(name_slug=slug)
return render(request, 'persondetail.html', {'name': name })
def root(request):
"""
Search > Root
"""
form = NotesSearchForm(request.GET)
results = form.search()
return render(request, 'search/search.html', {
'search_query' : search_query,
'notes' : results,
})
``` |
{
"source": "jouraOuss/sdia-python",
"score": 3
} |
#### File: tests/lab2/test_ball_window.py
```python
import numpy as np
import pytest
from sdia_python.lab2.ball_window import BallWindow
# def test_raise_type_error_when_something_is_called():
# with pytest.raises(TypeError):
# # call_something_that_raises_TypeError()
# pass
# ================================
# ==== WRITE YOUR TESTS BELOW ====
# ================================
@pytest.mark.parametrize(
"center, radius, expected",
[
([-1, 4], 1, "BallWindow: center=[-1.00 4.00], radius=1.00"),
([1.40, 1.10, 10], 0.70, "BallWindow: center=[1.40 1.10 10.00], radius=0.70"),
(
[0, 5, -1.45, 3.14],
np.sqrt(2),
"BallWindow: center=[0.00 5.00 -1.45 3.14], radius=1.41",
),
],
)
def test_ball_string_representation(center, radius, expected):
assert str(BallWindow(center, radius)) == expected
@pytest.fixture
def example_ball_window():
center = [2, 1]
radius = 2.0
return BallWindow(center, radius)
@pytest.mark.parametrize(
"points, expected", [([0, 0], False), ([0, 1], True), ([5, 6], False),],
)
def test_indicator(example_ball_window, points, expected):
ball = example_ball_window
assert ball.indicator(points) == expected
@pytest.mark.parametrize(
"ball, expected",
[
(BallWindow([0, 1], 1), np.pi),
(BallWindow([1, 0, 2], 3), 113.09733552923254),
(BallWindow([0, -4, 1, 3], 3), 399.718978244119),
],
)
def test_volume(ball, expected):
assert (ball.volume()) == expected
``` |
{
"source": "jourdain/trame-ai-lenet-5",
"score": 2
} |
#### File: app/engine/main.py
```python
import numpy as np
import base64
import asyncio
from pathlib import Path
import multiprocessing
from functools import partial
from concurrent.futures import ProcessPoolExecutor
from . import ml, utils, charts
from trame import state, controller as ctrl
MULTI_PROCESS_MANAGER = None
PROCESS_EXECUTOR = None
PENDING_TASKS = []
# -----------------------------------------------------------------------------
# Initial state
# -----------------------------------------------------------------------------
TRAINING_INITIAL_STATE = {
"epoch_end": 0,
"model_state": {
"epoch": 0,
"training_accuracy": [],
"training_loss": [],
"validation_accuracy": [],
"validation_loss": [],
},
"xai_results": [],
}
state.update(
{
**TRAINING_INITIAL_STATE,
"training_running": False,
"prediction_success": False,
"prediction_available": False,
}
)
def initialize(**kwargs):
global MULTI_PROCESS_MANAGER, PROCESS_EXECUTOR
MULTI_PROCESS_MANAGER = multiprocessing.Manager()
PROCESS_EXECUTOR = ProcessPoolExecutor(1)
if ml.has_trained_model() and state.epoch_end == 0:
# Just load existing state
asyncio.create_task(training_add())
reset_model()
prediction_update()
# -----------------------------------------------------------------------------
# Methods to bound to UI
# -----------------------------------------------------------------------------
state.epoch_increase = 2 # we need a minimum of 2 points to plot progress
async def training_add():
"""Add 10 epoch to current training"""
await asyncio.gather(*PENDING_TASKS)
PENDING_TASKS.clear()
if state.model_state.get("epoch") >= state.epoch_end:
state.epoch_end += state.epoch_increase
loop = asyncio.get_event_loop()
queue = MULTI_PROCESS_MANAGER.Queue()
task_training = loop.run_in_executor(
PROCESS_EXECUTOR,
partial(ml.training_add, queue, state.epoch_end),
)
task_monitor = loop.create_task(utils.queue_to_state(queue, task_training))
# Only join on monitor task
PENDING_TASKS.append(task_monitor)
reset_model()
# -----------------------------------------------------------------------------
def training_reset():
"""Remove saved model and reset local state"""
ml.delete_model()
state.update(TRAINING_INITIAL_STATE)
reset_model()
# -----------------------------------------------------------------------------
def prediction_update():
image, label, prediction = ml.prediction_update()
image_path = Path(f"{ml.DATA_DIR}/{label}.jpg")
image.save(image_path)
with open(image_path, "rb") as file:
data = base64.encodebytes(file.read()).decode("utf-8")
state.prediction_input_url = f"data:image/jpeg;base64,{data}"
state.prediction_label = label
state.prediction_success = max(prediction) == prediction[label]
ctrl.chart_pred_update(charts.prediction_chart(prediction))
if state.xai_viz:
xai_run()
# -----------------------------------------------------------------------------
def _prediction_next_failure():
with state.monitor():
prediction_update()
if not state.prediction_success:
state.prediction_search_failure = False
if state.prediction_success and state.prediction_search_failure:
loop = asyncio.get_event_loop()
loop.call_later(0.01, _prediction_next_failure)
# -----------------------------------------------------------------------------
def xai_run():
try:
results = {}
model, image = ml.prediction_xai_params()
for xai_method in ml.SALIENCY_TYPES:
result = ml.xai_update(model, image, xai_method)
heatmaps = {}
data_range = [float(np.amin(result)), float(np.amax(result))]
for i in range(10):
heatmaps[f"{i}"] = result[i].ravel().tolist()
results[xai_method] = {"heatmaps": heatmaps, "range": data_range}
state.xai_results = results
except Exception:
pass # Model is not available...
# -----------------------------------------------------------------------------
def _testing_running():
with state.monitor():
matrix, sample_size = ml.testing_run()
ctrl.chart_confusion_matrix(charts.confusion_matrix_chart(matrix))
ctrl.chart_class_accuracy(charts.class_accuracy(matrix))
state.testing_count = sample_size
state.testing_running = False
# -----------------------------------------------------------------------------
def testing_run():
state.testing_running = True
loop = asyncio.get_event_loop()
loop.call_later(0.01, _testing_running)
# -----------------------------------------------------------------------------
def reset_model():
state.prediction_available = ml.prediction_reload()
state.testing_count = 0
# -----------------------------------------------------------------------------
# State listeners
# -----------------------------------------------------------------------------
@state.change("model_state")
def update_charts(model_state, **kwargs):
acc, loss = charts.acc_loss_charts(model_state)
ctrl.chart_acc_update(acc)
ctrl.chart_loss_update(loss)
@state.change("xai_viz_color_min", "xai_viz_color_max")
def update_xai_color_range(xai_viz_color_min, xai_viz_color_max, **kwargs):
state.xai_viz_color_range = [xai_viz_color_min, xai_viz_color_max]
@state.change("xai_viz")
def toggle_xai_viz(xai_viz, **kwargs):
if xai_viz:
xai_run()
@state.change("prediction_search_failure")
def toggle_search_failue(prediction_search_failure, **kwargs):
if prediction_search_failure:
_prediction_next_failure()
```
#### File: app/engine/utils.py
```python
import asyncio
from .ml import prediction_reload
from trame import state
async def queue_to_state(queue, *tasks):
_process_running = True
while _process_running:
if queue.empty():
await asyncio.sleep(1)
else:
msg = queue.get_nowait()
if isinstance(msg, str):
# command
if msg == "stop":
_process_running = False
else:
# Need to monitor as we are outside of client/server update
with state.monitor():
# state update (dict)
state.update(msg)
await asyncio.gather(*tasks)
# Make sure we can go to prediction
state.prediction_available = prediction_reload()
state.testing_count = 0
state.flush("prediction_available", "testing_count")
``` |
{
"source": "JourdanClark/nova-agent",
"score": 2
} |
#### File: nova-agent/novaagent/novaagent.py
```python
from __future__ import absolute_import
import argparse
import logging
import time
import sys
import os
from pyxs.connection import XenBusConnection
from pyxs.client import Client
from novaagent.xenbus import XenGuestRouter
from novaagent.libs import alpine
from novaagent.libs import centos
from novaagent.libs import debian
from novaagent.libs import redhat
from novaagent import utils
log = logging.getLogger(__name__)
# Connect to Xenbus in order to interact with xenstore
XENBUS_ROUTER = XenGuestRouter(XenBusConnection())
def action(server_os, client=None):
# Return whether or not to trigger init notification
trigger_notify = False
xen_events = utils.list_xen_events(client)
if len(xen_events) == 0:
# if no xen_events then trigger the notification
trigger_notify = True
for uuid in xen_events:
event = utils.get_xen_event(uuid, client)
log.info('Event: {0} -> {1}'.format(uuid, event['name']))
command_return = ('', '')
if hasattr(server_os, event['name']):
run_command = getattr(server_os, event['name'])
command_return = run_command(event['name'], event['value'], client)
utils.remove_xenhost_event(uuid, client)
message = command_return[1]
return_code = command_return[0]
if command_return[0] == '':
return_code = '0'
utils.update_xenguest_event(
uuid,
{'message': message, 'returncode': return_code},
client
)
log.info(
'Returning {{"message": "{0}", "returncode": "{1}"}}'.format(
message,
return_code
)
)
if event['name'] == 'resetnetwork':
# If the network has completed setup then trigger the notification
trigger_notify = True
return trigger_notify
def nova_agent_listen(server_type, server_os, notify, server_init):
log.info('Starting actions for {0}'.format(server_type.__name__))
log.info('Checking for existence of /dev/xen/xenbus')
send_notification = True
notify_init = False
if os.path.exists('/dev/xen/xenbus'):
with Client(router=XENBUS_ROUTER) as xenbus_client:
check_provider(utils.get_provider(client=xenbus_client))
while True:
notify_init = action(server_os, client=xenbus_client)
if send_notification and notify_init:
log.info('Sending notification startup is complete')
utils.send_notification(server_init, notify)
send_notification = False
time.sleep(1)
else:
check_provider(utils.get_provider())
while True:
notify_init = action(server_os)
if send_notification and notify_init:
log.info('Sending notification startup is complete')
utils.send_notification(server_init, notify)
send_notification = False
time.sleep(1)
def get_server_type():
server_type = None
if os.path.exists('/etc/alpine-release'):
server_type = alpine
elif (
os.path.exists('/etc/centos-release') or
os.path.exists('/etc/fedora-release')
):
server_type = centos
elif os.path.exists('/etc/redhat-release'):
server_type = redhat
elif os.path.exists('/etc/debian_version'):
server_type = debian
return server_type
def get_init_system():
# Checking for systemd on OS
try:
os.stat('/run/systemd/system')
log.debug('Systemd is the init system')
return 'systemd'
except Exception:
pass
# Check if upstart system was used to start agent
upstart_job = os.environ.get('UPSTART_JOB', None)
# RHEL 6 and CentOS 6 use rc upstart job to start all the SysVinit
# emulation layer instead of individual init scripts
if upstart_job not in (None, 'rc'):
log.debug('Upstart job that started script: {0}'.format(upstart_job))
return 'upstart'
# return None and no notifications to init system will occur
log.debug('SysVinit is the init system')
return None
def create_parser():
parser = argparse.ArgumentParser(description='Args for novaagent')
parser.add_argument(
'-l',
dest='loglevel',
type=str,
default='info',
choices=('warning', 'info', 'debug'),
help='log level'
)
parser.add_argument(
'-o',
dest='logfile',
default='-',
type=str,
help='path to log file'
)
parser.add_argument(
'--no-fork',
dest='no_fork',
default=False,
type=bool,
help='Perform os.fork when starting agent'
)
return parser
def check_provider(provider):
if provider is None or provider.lower() != 'rackspace':
log.error('Invalid provider for instance, agent will exit')
os._exit(1)
return
def main():
parser = create_parser()
args = parser.parse_args()
loglevel = getattr(logging, args.loglevel.upper())
log_format = "%(asctime)s [%(levelname)-5.5s] %(message)s"
if args.logfile == '-':
logging.basicConfig(
stream=sys.stdout,
level=loglevel,
format=log_format
)
else:
logging.basicConfig(
filename=args.logfile,
level=loglevel,
format=log_format
)
log.info('Agent is starting up')
server_type = get_server_type()
server_os = server_type.ServerOS()
server_init = get_init_system()
if args.no_fork is False:
log.info('Starting daemon')
try:
pid = os.fork()
if pid > 0:
log.info('PID: {0}'.format(pid))
os._exit(0)
except OSError as error:
log.error(
'Unable to fork. Error: {0} {1}'.format(
error.errno,
error.strerror
)
)
os._exit(1)
else:
log.info('Skipping os.fork as directed by arguments')
notify = None
if server_init == 'systemd':
notify = utils.notify_socket()
nova_agent_listen(server_type, server_os, notify, server_init)
if __name__ == '__main__':
main()
```
#### File: novaagent/xenstore/xenstore.py
```python
from subprocess import PIPE
from subprocess import Popen
import json
def xenstore_read(path, client, to_json=False):
result = None
if client is None:
p = Popen(
['xenstore-read', path],
stdout=PIPE,
stderr=PIPE
)
output, _ = p.communicate()
if p.returncode == 0:
result = output.decode('utf-8').strip()
else:
result = client.read(path).decode('utf-8').strip()
if result and to_json:
return json.loads(result)
return result
def xenstore_list(path, client):
result = []
if client is None:
p = Popen(
['xenstore-ls', path],
stdout=PIPE,
stderr=PIPE
)
out, _ = p.communicate()
if p.returncode == 0:
decoded_out = out.decode('utf-8').split('\n')
result = [
item.split(' = ')[0] for item in decoded_out if item
]
else:
for item in client.list(path):
result.append(item.decode('utf-8').strip())
return result
def xenstore_write(write_path, write_value, client):
if client is None:
p = Popen(
['xenstore-write', write_path, write_value],
stdout=PIPE,
stderr=PIPE
)
p.communicate()
if p.returncode != 0:
raise ValueError(
'Shell to xenstore-write returned invalid code {0}'.format(
p.returncode
)
)
else:
client.write(write_path, write_value)
return
def xenstore_delete(path, client):
if client is None:
p = Popen(
['xenstore-rm', path],
stdout=PIPE,
stderr=PIPE
)
_out, _err = p.communicate()
if p.returncode != 0:
raise ValueError(
'Shell to xenstore-rm returned invalid code {0}'.format(
p.returncode
)
)
else:
client.delete(path)
return
```
#### File: nova-agent/tests/tests_xenbus.py
```python
from pyxs.client import Client
from pyxs.connection import XenBusConnection
from novaagent import xenbus
import logging
import sys
if sys.version_info[:2] >= (2, 7):
from unittest import TestCase
else:
from unittest2 import TestCase
class TestXenBus(TestCase):
def setUp(self):
logging.disable(logging.ERROR)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_init(self):
c = Client(router=xenbus.XenGuestRouter(XenBusConnection()))
assert isinstance(c.router.connection, XenBusConnection)
assert not c.router.thread.is_alive()
``` |
{
"source": "JouriB/Casimir-programming",
"score": 4
} |
#### File: JouriB/Casimir-programming/script.py
```python
print('Good Morning')
r=10
import matplotlib.pyplot as plt
import test
print('Radius =',r)
print('Circumference =',test.circ(r))
print('Surface =',test.surf(r))
import numpy as np
def y(r,x):
return y=np.sqrt(r**2-x**2)
plt.plot([0,1,2],[3,4,5])
``` |
{
"source": "journalprivacyconfidentiality/Accuracy-First-Differential-Privacy",
"score": 3
} |
#### File: Accuracy-First-Differential-Privacy/code/covar.py
```python
import numpy as np
import math
# our files
import sparsevec
import noise_reduc
import ridge
import theory
# get a function f(data) -> excess error of beta_hat on data
# where data is a 3-tuple: X,Y,opt_err
# and compute_err_func(X, Y, beta_hat) -> error
def get_excess_err_func(beta_hat, compute_err_func):
def f(data):
X, Y, opt_err = data
err = compute_err_func(X, Y, beta_hat)
return err - opt_err
return f
# get an upper bound on the epsilon we need to achieve alpha
def get_max_epsilon(n, lamb, alpha, gamma, dim, max_norm):
eps_for_expected = theory.covar_get_epsilon(alpha, n, dim, max_norm)
return 2.0 * eps_for_expected
# compute the list of beta_hats for noise reduction
def construct_beta_hats(Sigma, R, eps_list, max_norm):
# to be eps-private, we need each Sigma and R to be (eps/2)-private
halved_eps_list = [eps/2.0 for eps in eps_list]
sigma_sensitivity = 2.0
Sigma_hats = noise_reduc.gen_list(Sigma, sigma_sensitivity, halved_eps_list)
r_sensitivity = 2.0
R_hats = noise_reduc.gen_list(R, r_sensitivity, halved_eps_list)
beta_hats = np.array([ridge.exact_solve_and_project(S_hat, R_hat, max_norm) for S_hat,R_hat in zip(Sigma_hats, R_hats)])
return beta_hats
# Return: beta_hat, algo_res
# where algo_res is a tuple containing:
# success (True or False)
# excess error
# sparsevec epsilon
# noise-reduc epsilon
# stopping index (number of AboveThresh iterations)
def run_covar(Sigma, R, alpha, gamma, max_norm, max_steps, min_eps, max_eps, sv_sens, data, compute_err_func):
sv_thresh, sv_eps = sparsevec.compute_epsilon(alpha, gamma, sv_sens, max_steps, True)
const = (max_eps / min_eps)**(1.0 / max_steps)
eps_list = np.array([min_eps * const**k for k in range(max_steps)])
# eps_list = np.linspace(min_eps, max_eps, max_steps)
beta_hats = construct_beta_hats(Sigma, R, eps_list, max_norm)
queries = np.array([get_excess_err_func(b, compute_err_func) for b in beta_hats])
stop_ind = sparsevec.below_threshold(data, queries, sv_thresh, sv_eps, sv_sens)
if stop_ind == -1: # failure
return beta_hats[-1], (False, queries[-1](data), -1, -1, -1)
else:
return beta_hats[stop_ind], (True, queries[stop_ind](data), sv_eps, eps_list[stop_ind], stop_ind)
```
#### File: Accuracy-First-Differential-Privacy/code/logist.py
```python
import math
import numpy as np
#from cvxpy import *
from sklearn.linear_model import LogisticRegression
MAX_ERR = 1000000000
# ---------------------------------------------
# Max norm of optimal hypothesis when this is the regularization param
def compute_max_norm(lamb):
return math.sqrt(2.0*math.log(2.0) / lamb)
# Sensitivity of a query to sparse vector
def get_sv_sensitivity(max_norm, n):
return math.log(1.0 + math.exp(max_norm)) / n
# L1-sensitivity of the optimal beta
def compute_opt_sensitivity(n, dim, lamb):
return 2.0 * math.sqrt(dim) / (n*lamb)
# ---------------------------------------------
def compute_err(X, Y, lamb, beta):
n = len(X)
total = 0.0
try:
for i in range(n):
total += math.log(1.0 + math.exp(-Y[i] * np.dot(X[i], beta)))
except OverflowError:
return MAX_ERR
avg = total / n
twonorm = np.dot(beta, beta)
return avg + 0.5 * lamb * twonorm
# ---------------------------------------------
def logistic_regression(X, Y, lamb):
C = 1.0 / (lamb * len(X))
lr = LogisticRegression(penalty="l2", C=C, fit_intercept=False)
lr.fit(X, Y)
beta = np.array(lr.coef_[0])
return beta, compute_err(X, Y, lamb, beta)
# Problem: this took too much memory (5+GB on a 120MB input data file)
# Input:
# X and Y are numpy arrays,
# X is dim by n
# Y is 1 by n, each entry is {-1, +1}
# lamb is regularization constant
#
# Output:
# optimal hypothesis beta
# value of its solution (optimal regularized error)
#def cvxpy_logistic_regression(X, Y, lamb):
# n = len(Y)
# d = X.shape[1]
# beta = Variable(d)
# # this version threw an error for me
# #expr1 = sum([log_sum_exp(vstack(0, -Y[i] * np.dot(X[i, :], beta))) for i in range(n)])
# expr1 = sum(logistic(X[i,:].T*beta*-Y[i]) for i in range(n))
# expr1 = conv(1.0 / float(n), sum(expr1))
# expr2 = square(norm(beta))
# expr2 = conv(lamb, expr2)
# expr_list = [expr1, expr2]
# expr = sum(expr_list)
# obj = Minimize(expr)
#
# p = Problem(obj)
# p.solve()
#
# beta_arr = np.array([b[0,0] for b in beta])
# return beta, p.value
#
```
#### File: Accuracy-First-Differential-Privacy/code/run_many_ridge.py
```python
import sys
import os
import math
import random
import numpy as np
import matplotlib.pyplot as plt
# our implementations
import run_ridge
import ridge
import theory
import covar
import output_pert
import naive_covar
#MIN_EPS = 0.00001
#MAX_EPS_CAP = 20.0
#MAX_NAIVE_EPS = 1000.0 # this one can be larger due to doubling
usage_str = """
Usage: python3 run_many.py datafilename lambda alpha gamma max_norm max_steps num_trials outputdir
Runs 'num_trials' separate trials, writing the output to the files
outputdir/trial_1.txt, outputdir/trial_2.txt, ....
--------
For other parameters:
""" + run_ridge.usage_str
def main(X, Y, lamb, alpha, gamma, max_norm, max_steps, num_trials, outputdir):
n = len(X)
dim = len(X[0])
if max_norm <= 0.0:
max_norm = ridge.compute_max_norm(lamb)
sv_sens = ridge.get_sv_sensitivity(max_norm, n)
opt_beta_sens = ridge.compute_opt_sensitivity(n, dim, lamb, max_norm)
compute_err_func = lambda X,Y,beta_hat: ridge.compute_err(X, Y, lamb, beta_hat)
# Compute opt
Sigma, R, opt_beta, opt_res = run_ridge.get_matrices_and_opt(X, Y, lamb)
opt_err = opt_res[0]
data = (X, Y, opt_err)
min_eps = 1.0 / n
max_covar_eps = 4.0 * theory.covar_get_epsilon(alpha, n, dim, max_norm)
max_naive_eps = max_covar_eps
max_output_eps = 4.0 * theory.output_pert_linreg_get_epsilon(alpha, n, dim, lamb, max_norm)
# Create output folder and write value of alpha
os.makedirs(outputdir)
with open(outputdir + "/alpha.txt", "w") as f:
f.write(str(alpha) + "\n")
# Compute results of methods and save them
for trial_ind in range(num_trials):
covar_beta_hat, covar_res = covar.run_covar(Sigma, R, alpha, gamma, max_norm, max_steps, min_eps, max_covar_eps, sv_sens, data, compute_err_func)
output_beta_hat, output_res = output_pert.run_output_pert(opt_beta, alpha, gamma, max_norm, max_steps, min_eps, max_output_eps, sv_sens, opt_beta_sens, data, compute_err_func)
naive_beta_hat, naive_res = naive_covar.run_naive(Sigma, R, alpha, gamma, max_norm, min_eps, max_naive_eps, sv_sens, data, compute_err_func)
with open(outputdir + "/trial_" + str(trial_ind+1) + ".txt", "w") as f:
f.write(run_ridge.stringify(opt_res))
f.write("\n")
f.write(run_ridge.stringify(opt_beta))
f.write("\n")
for beta, res in [(covar_beta_hat, covar_res), (output_beta_hat, output_res), (naive_beta_hat, naive_res)]:
success, excess_err, sv_eps, my_eps, index = res
two_norm = np.linalg.norm(beta)
mse = ridge.compute_err(X, Y, 0.0, beta)
f.write(run_ridge.stringify(("1" if success else "0", excess_err, sv_eps, my_eps, index, two_norm, mse)))
f.write("\n")
f.write(run_ridge.stringify(beta))
f.write("\n")
# when run as script, read parameters from input
# (other python scripts can call main(), above, directly)
if __name__ == "__main__":
X, Y, lamb, alpha, gamma, max_norm, max_steps = run_ridge.parse_inputs(sys.argv)
try:
num_trials = int(sys.argv[7])
outputdir = sys.argv[8]
except:
print(usage_str)
exit(0)
main(X, Y, lamb, alpha, gamma, max_norm, max_steps, num_trials, outputdir)
```
#### File: Accuracy-First-Differential-Privacy/code/sgd_ridge.py
```python
import math
import random
import numpy as np
import project
# Only accept if noisy error is below THRESHOLD_FRAC * alpha
# (and bound the chance that any noise variable exceeds (1 - THRESHOLD_FRAC)*alpha
THRESHOLD_FRAC = 0.5
def compute_variance(n, epsilon, delta, lipschitz):
num = 32.0 * lipschitz**2.0 * n**2.0 * math.log(n/delta) * math.log(1/delta)
den = epsilon**2.0
return num / den
# data is a 3-tuple: X,Y,opt_err
def get_excess_err(beta_hat, compute_err_func, data):
X, Y, opt_err = data
err = compute_err_func(X, Y, beta_hat)
return err - opt_err
#def loss_subgradient(beta, x, y, n, lamb):
# c = np.dot(beta, x) - y
# return np.array([c*x[i] + (lamb/n)*beta[i] for i in range(len(beta))])
def sgd_ridge(data, epsilon, delta, lamb, max_norm):
X, Y, opt_err = data
n = len(X)
lipschitz = max_norm + 1.0 + max_norm*lamb
strong_convexity = lamb
variance = compute_variance(n, epsilon, delta, lipschitz)
sigma = math.sqrt(variance)
beta = np.zeros(len(X[0]))
indices = range(n)
for t in range(1, n**2):
data_ind = random.choice(indices)
x, y = X[data_ind], Y[data_ind]
noise = sigma * np.random.normal(size=(len(beta)))
eta = 1.0 / (t * strong_convexity)
# subtract eta * (subgradient + noise)
c1 = np.dot(beta, x) - y
c2 = lamb / n
for i in range(len(beta)):
beta[i] -= eta * (c1*x[i] + c2*beta[i] + noise[i])
# project back into max_norm
curr_norm = np.linalg.norm(beta)
if curr_norm > max_norm:
ratio = max_norm / curr_norm
for i in range(len(beta)):
beta[i] = ratio*beta[i]
return beta
# return threshold, epsilon so that
# with prob 1-gamma, max of num_steps independent Laplaces(sensitivity/eps)
# is at most alpha-threshold
def compute_test_epsilon(alpha, gamma, sens, num_steps):
thresh = alpha * THRESHOLD_FRAC
# Laplace parameter
b = (alpha - thresh) / math.log(num_steps / (2.0*gamma))
eps = sens / b
return thresh, eps
# compute_err_func is the loss function,
# see "get_excess_err_func" above
def run_naive_sgd(opt_beta, alpha, gamma, max_norm, min_eps, max_eps, sv_sens, data, compute_err_func, lamb):
max_step = int(math.log2(max_eps / min_eps) + 1.0)
test_thresh, test_eps = compute_test_epsilon(alpha, gamma, sv_sens, max_step+1.0)
eps_list = np.array([min_eps * 2.0**k for k in range(max_step+1)])
beta_hat = opt_beta
excess_err = 0.0
result = -1
per_step_delta = 1.0 / (len(data[0]) * len(eps_list))
for t,eps in enumerate(eps_list):
beta_hat = sgd_ridge(data, eps, per_step_delta, lamb, max_norm)
excess_err = get_excess_err(beta_hat, compute_err_func, data)
if excess_err + np.random.laplace(scale=sv_sens/test_eps) <= test_thresh:
result = t
break
if result == -1: # failure
return opt_beta, (False, excess_err, -1, -1, len(eps_list))
else:
return beta_hat, (True, excess_err, test_eps*(result+1), sum(eps_list[:result+1]), result+1)
```
#### File: datasets/KDDcup99/process_kdd.py
```python
import sys, os
import math
import numpy as np
#import matplotlib.pyplot as plt
col_1_attrs = ['udp', 'tcp', 'icmp']
col_2_attrs = ['ssh', 'login', 'nntp', 'iso_tsap', 'pm_dump', 'Z39_50', 'eco_i', 'ftp', 'pop_3', 'rje', 'ctf', 'nnsp', 'courier', 'uucp', 'whois', 'daytime', 'smtp', 'kshell', 'sunrpc', 'uucp_path', 'telnet', 'vmnet', 'X11', 'discard', 'urp_i', 'shell', 'netstat', 'hostnames', 'http_443', 'http', 'ecr_i', 'auth', 'red_i', 'netbios_ssn', 'netbios_dgm', 'mtp', 'domain_u', 'printer', 'efs', 'time', 'urh_i', 'tim_i', 'other', 'ldap', 'domain', 'name', 'ftp_data', 'klogin', 'link', 'sql_net', 'private', 'pop_2', 'imap4', 'IRC', 'remote_job', 'exec', 'csnet_ns', 'tftp_u', 'bgp', 'echo', 'finger', 'ntp_u', 'supdup', 'systat', 'gopher', 'netbios_ns']
col_3_attrs = ['S1', 'S3', 'REJ', 'RSTOS0', 'OTH', 'SH', 'RSTR', 'SF', 'S2', 'S0', 'RSTO']
last_col_attrs = ['normal.', 'phf.', 'ipsweep.', 'spy.', 'pod.', 'smurf.', 'ftp_write.', 'satan.', 'land.', 'multihop.', 'imap.', 'teardrop.', 'neptune.', 'warezmaster.', 'warezclient.', 'loadmodule.', 'buffer_overflow.', 'nmap.', 'guess_passwd.', 'back.', 'perl.', 'rootkit.', 'portsweep.']
def log_transform(c):
return math.log(1.0 + c)
X = []
Y = []
with open("kddcup.data_10_percent") as f:
for line in f.readlines():
words = line.strip().split(",")
words2 = [words[0]] + words[4:-1] + ["-1" if words[-1]=="normal." else "1"]
nums = list(map(float, words2))
for ind in [0, 1, 2]:
nums[ind] = log_transform(nums[ind])
X.append(nums[:-1])
Y.append(nums[-1])
X = np.array(X)
Y = np.array(Y)
x_norms = [sum(list(map(abs, x))) for x in X]
max_x_norm = max(x_norms)
y_norms = list(map(abs,Y))
max_y_norm = max(y_norms)
print("n = " + str(len(X)))
print("d = " + str(len(X[0])))
print("")
print("X norms:")
print("Max 1-norm: " + str(max_x_norm))
print("Average 1-norm: " + str(sum(x_norms)/len(x_norms)))
x_norms.sort()
print("Median 1-norm: " + str(x_norms[int(len(x_norms)/2)]))
print("")
print("Y norms:")
print("Max 1-norm: " + str(max_y_norm))
print("Average 1-norm: " + str(sum(y_norms)/len(y_norms)))
y_norms.sort()
print("Median 1-norm: " + str(y_norms[int(len(y_norms)/2)]))
#print("Maximum value of each column of X:")
#print([max(X[:,i]) for i in range(len(X[0]))])
#plt.figure()
#plt.plot(y_norms,np.linspace(0,1,len(y_norms)))
#plt.title("Y 1-norm CDF")
#plt.show()
with open("kddcup_dataset.txt", "w") as f:
for x,y in zip(X,Y):
xhat = [x/max_x_norm for x in x]
yhat = y/max_y_norm
f.write(" ".join(list(map(str, xhat))))
f.write(" ")
f.write(str(yhat))
f.write("\n")
``` |
{
"source": "journalprivacyconfidentiality/private-pgm-jpc-778",
"score": 2
} |
#### File: src/mbi/mixture_inference.py
```python
from mbi import Dataset, Factor, CliqueVector
from scipy.optimize import minimize
from collections import defaultdict
import numpy as np
from scipy.special import softmax
from functools import reduce
from scipy.sparse.linalg import lsmr
""" This file is experimental.
It is a close approximation to the method described in RAP (https://arxiv.org/abs/2103.06641)
and an even closer approximation to RAP^{softmax} (https://arxiv.org/abs/2106.07153)
Notable differences:
- Code now shares the same interface as Private-PGM (see FactoredInference)
- Named model "MixtureOfProducts", as that is one interpretation for the relaxed tabular format
(at least when softmax is used).
- Added support for unbounded-DP, with automatic estimate of total.
"""
def estimate_total(measurements):
# find the minimum variance estimate of the total given the measurements
variances = np.array([])
estimates = np.array([])
for Q, y, noise, proj in measurements:
o = np.ones(Q.shape[1])
v = lsmr(Q.T, o, atol=0, btol=0)[0]
if np.allclose(Q.T.dot(v), o):
variances = np.append(variances, noise**2 * np.dot(v, v))
estimates = np.append(estimates, np.dot(v, y))
if estimates.size == 0:
return 1
else:
variance = 1.0 / np.sum(1.0 / variances)
estimate = variance * np.sum(estimates / variances)
return max(1, estimate)
def adam(loss_and_grad, x0, iters=250):
a = 1.0
b1, b2 = 0.9, 0.999
eps = 10e-8
x = x0
m = np.zeros_like(x)
v = np.zeros_like(x)
for t in range(1, iters+1):
l, g = loss_and_grad(x)
#print(l)
m = b1 * m + (1- b1) * g
v = b2 * v + (1 - b2) * g**2
mhat = m / (1 - b1**t)
vhat = v / (1 - b2**t)
x = x - a * mhat / (np.sqrt(vhat) + eps)
# print np.linalg.norm(A.dot(x) - y, ord=2)
return x
class ProductDist:
def __init__(self, factors, domain, total):
"""
:param factors: a list of factors
defined over disjoint subsets of attributes
:param domain: the domain object
:param total: known or estimated total
"""
self.factors = factors
self.domain = domain
self.total = total
def project(self, cols):
domain = self.domain.project(cols)
factors = { col : self.factors[col] for col in cols }
return ProductDist(factors, domain, self.total)
def datavector(self, flatten=True):
ans = reduce(lambda x,y: x*y, self.factors.values(), 1.0)
ans = ans.transpose(self.domain.attrs)
return ans.datavector(flatten) * self.total
class MixtureOfProducts:
def __init__(self, products):
self.products = products
self.domain = products[0].domain
self.total = sum(P.total for P in products)
def project(self, cols):
return MixtureOfProducts([P.project(cols) for P in self.products])
def datavector(self, flatten=True):
return sum(P.datavector(flatten) for P in self.products)
class MixtureInference:
def __init__(self, domain, components=10, metric='L2'):
"""
:param domain: A Domain object
:param components: The number of mixture components
:metric: The metric to use for the loss function (can be callable)
"""
self.domain = domain
self.components = components
self.metric = metric
def estimate(self, measurements, total=None, alpha=0.1):
if total == None:
total = estimate_total(measurements)
self.measurements = measurements
cliques = [M[-1] for M in measurements]
def get_model(params):
idx = 0
products = []
for _ in range(self.components):
factors = {}
for col in self.domain:
n = self.domain[col]
vals = softmax(params[idx:idx+n])
idx += n
factors[col] = Factor(self.domain.project(col), vals)
products.append(ProductDist(factors, self.domain, total/self.components))
return MixtureOfProducts(products)
def loss_and_grad(params):
# first create the model
model = get_model(params)
# Now calculate the necessary marginals
mu = CliqueVector.from_data(model, cliques)
loss, dL = self._marginal_loss(mu)
# Now back-propagate gradient to params
dparams = np.zeros(params.size) # what to do with total?
for cl in dL:
idx = 0
for i in range(self.components):
submodel = model.products[i]
mui = Factor(self.domain.project(cl), submodel.project(cl).datavector())
tmp = dL[cl] * mui
for col in self.domain:
n = self.domain[col]
if col in cl:
dpij = (tmp / submodel.factors[col]).project([col]).datavector()
pij = submodel.factors[col].datavector()
dparams[idx:idx+n] += dpij * pij - pij*(pij @ dpij)
idx += n
return loss, dparams
params = np.random.normal(loc=0, scale=0.25, size=sum(self.domain.shape) * self.components)
params = adam(loss_and_grad, params)
return get_model(params)
def _marginal_loss(self, marginals, metric=None):
""" Compute the loss and gradient for a given dictionary of marginals
:param marginals: A dictionary with keys as projections and values as Factors
:return loss: the loss value
:return grad: A dictionary with gradient for each marginal
"""
if metric is None:
metric = self.metric
if callable(metric):
return metric(marginals)
loss = 0.0
gradient = { cl : Factor.zeros(marginals[cl].domain) for cl in marginals }
for Q, y, noise, cl in self.measurements:
mu = marginals[cl]
c = 1.0/noise
x = mu.datavector()
diff = c*(Q @ x - y)
if metric == 'L1':
loss += abs(diff).sum()
sign = diff.sign() if hasattr(diff, 'sign') else np.sign(diff)
grad = c*(Q.T @ sign)
else:
loss += 0.5*(diff @ diff)
grad = c*(Q.T @ diff)
gradient[cl] += Factor(mu.domain, grad)
return float(loss), CliqueVector(gradient)
``` |
{
"source": "Journera/glutil",
"score": 2
} |
#### File: glutil/tests/partitioner_test.py
```python
from unittest import TestCase
from unittest.mock import MagicMock, call, ANY
from moto import mock_s3, mock_glue
from .helper import GlueHelper
import boto3
import pendulum
import sure # noqa: F401
from glutil import Partitioner, Partition, GlutilError
class PartitionerTest(TestCase):
bucket = "test-bucket"
database = "test_database"
table = "test_table"
region = "us-east-1"
def setUp(self):
super().setUp()
self.helper = GlueHelper(
default_bucket=self.bucket,
default_database=self.database,
default_table=self.table)
self.glue = boto3.client("glue", region_name=self.region)
self.s3 = boto3.client("s3", region_name=self.region)
@mock_glue
def test_init(self):
"""Confirm Partitioner.__init__ grabs table data from AWS"""
self.helper.make_database_and_table()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.bucket.should.equal(self.bucket)
partitioner.prefix.should.equal("test_table/")
@mock_glue
def test_init_no_table(self):
"""Partitioner.__init__ should raise an error when no table exists"""
with self.assertRaises(GlutilError) as context:
Partitioner(self.database, self.table, aws_region=self.region)
exception = context.exception
exception.error_type.should.equal("EntityNotFound")
@mock_glue
@mock_s3
def test_find_partitions_in_s3(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitions = self.helper.create_many_partitions(count=10)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk()
set(found_partitions).should.equal(set(partitions))
@mock_glue
@mock_s3
def test_find_partitions_in_s3_with_hive_formatted_partitions(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
# partitions = self.helper.create_many_partitions(count=10)
partitions = []
for i in range(1, 11):
partition = Partition(["2019", "01", f"{i:02d}", "03"], f"s3://{self.bucket}/{self.table}/year=2019/month=01/day={i:02d}/hour=03/")
print(partition.location)
self.helper.write_partition_to_s3(partition)
partitions.append(partition)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk()
set(found_partitions).should.equal(set(partitions))
@mock_glue
@mock_s3
def test_find_partitions_with_limit_hive_format(self):
"""Partitioner.partitions_on_disk() with limit days set should work on hive-formatted partitions"""
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
today = pendulum.now()
partitions = []
for i in range(1, 11):
partition_date = today.subtract(days=i)
year = partition_date.strftime("%Y")
month = partition_date.strftime("%m")
day = partition_date.strftime("%d")
hour = "03"
partition = Partition([year, month, day, hour], f"s3://{self.bucket}/{self.table}/year={year}/month={month}/day={day}/hour={hour}/")
self.helper.write_partition_to_s3(partition)
partitions.append(partition)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk(limit_days=7)
found_partitions.should.have.length_of(7)
set(found_partitions).should.equal(set(partitions[0:7]))
@mock_glue
@mock_s3
def test_find_partitions_with_limit_hive_format_capital_keys(self):
"""Partitioner.partitions_on_disk() with limit days set should work on hive-formatted partitions where they keys are not lowercase"""
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database()
self.helper.make_table(partition_keys=[
{"Name": "Year", "Type": "int"},
{"Name": "Month", "Type": "int"},
{"Name": "Day", "Type": "int"},
{"Name": "Hour", "Type": "int"},
])
today = pendulum.now()
partitions = []
for i in range(1, 11):
partition_date = today.subtract(days=i)
year = partition_date.strftime("%Y")
month = partition_date.strftime("%m")
day = partition_date.strftime("%d")
hour = "03"
partition = Partition([year, month, day, hour], f"s3://{self.bucket}/{self.table}/Year={year}/Month={month}/Day={day}/Hour={hour}/")
self.helper.write_partition_to_s3(partition)
partitions.append(partition)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk(limit_days=7)
print(found_partitions)
print(partitions[0:7])
found_partitions.should.have.length_of(7)
set(found_partitions).should.equal(set(partitions[0:7]))
@mock_glue
@mock_s3
def test_find_partitions_with_limit_flat_format(self):
"""Partitioner.partitions_on_disk() with limit days set should work with flat-formatted partitions"""
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
today = pendulum.now()
partitions = []
for i in range(1, 11):
partition_date = today.subtract(days=i)
year = partition_date.strftime("%Y")
month = partition_date.strftime("%m")
day = partition_date.strftime("%d")
hour = "03"
partition = Partition([year, month, day, hour], f"s3://{self.bucket}/{self.table}/{year}/{month}/{day}/{hour}/")
self.helper.write_partition_to_s3(partition)
partitions.append(partition)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk(limit_days=4)
found_partitions.should.have.length_of(4)
set(found_partitions).should.equal(set(partitions[0:4]))
@mock_glue
@mock_s3
def test_find_partitions_with_bad_limit(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.partitions_on_disk.when.called_with(limit_days=-1).should.throw(ValueError)
@mock_glue
@mock_s3
def test_find_partitions_with_limit_days_and_prefix(self):
"""Partitioner.partitions_on_disk() with limit_days and prefix_partitions should find preceding partitions with hive-format names"""
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database()
self.helper.make_table(partition_keys=[
{"Name": "region", "Type": "string"},
{"Name": "year", "Type": "int"},
{"Name": "month", "Type": "int"},
{"Name": "day", "Type": "int"},
])
today = pendulum.now()
partitions = []
for i in range(1, 11):
partition_date = today.subtract(days=i)
year = partition_date.strftime("%Y")
month = partition_date.strftime("%m")
day = partition_date.strftime("%d")
partition_east = Partition(["us-east-1", year, month, day], f"s3://{self.bucket}/{self.table}/region=us-east-1/year={year}/month={month}/day={day}/")
partition_west = Partition(["us-west-2", year, month, day], f"s3://{self.bucket}/{self.table}/region=us-east-1/year={year}/month={month}/day={day}/")
self.helper.write_partition_to_s3(partition_east)
self.helper.write_partition_to_s3(partition_west)
partitions.append(partition_east)
partitions.append(partition_west)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk(limit_days=4, prefix_partitions=["us-east-1"])
found_partitions.should.have.length_of(4)
to_be_found = []
for i in range(1, 5):
partition_date = today.subtract(days=i)
year = partition_date.strftime("%Y")
month = partition_date.strftime("%m")
day = partition_date.strftime("%d")
to_be_found.append(Partition(["us-east-1", year, month, day], f"s3://{self.bucket}/{self.table}/region=us-east-1/year={year}/month={month}/day={day}/"))
set(found_partitions).should.equal(set(to_be_found))
@mock_glue
@mock_s3
def test_create_partitions(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partition = self.helper.create_partition_data()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
create_partitions_mock = MagicMock(return_value=[])
partitioner.glue.batch_create_partition = create_partitions_mock
partitioner.create_partitions([partition])
create_partitions_mock.assert_called_with(
DatabaseName=self.database,
TableName=self.table,
PartitionInputList=[partitioner._partition_input(partition)])
@mock_s3
@mock_glue
def test_create_partition_when_partition_exists(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partition = self.helper.create_partition_data()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
create_partitions_mock = MagicMock(return_value={
"Errors": [{
"PartitionValues": partition.values,
"ErrorDetail": {
"ErrorCode": "AlreadyExistsException",
"ErrorMessage": "Partition already exists"
}}]
})
partitioner.glue.batch_create_partition = create_partitions_mock
errors = partitioner.create_partitions([partition])
create_partitions_mock.assert_called_once()
errors.should.have.length_of(1)
errors[0]["PartitionValues"].should.equal(partition.values)
errors[0]["ErrorDetail"]["ErrorCode"].should.equal(
"AlreadyExistsException")
@mock_s3
@mock_glue
def test_create_partition_batches_by_one_hundred(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitions = sorted(self.helper.create_many_partitions(count=150))
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
create_partitions_mock = MagicMock(return_value=[])
partitioner.glue.batch_create_partition = create_partitions_mock
partitioner.create_partitions(partitions)
first_list = [partitioner._partition_input(p) for p in partitions[:100]]
second_list = [partitioner._partition_input(p) for p in partitions[100:]]
calls = [
call(
DatabaseName=self.database,
TableName=self.table,
PartitionInputList=first_list),
call(
DatabaseName=self.database,
TableName=self.table,
PartitionInputList=second_list),
]
create_partitions_mock.call_count.should.equal(2)
create_partitions_mock.assert_has_calls(calls)
@mock_s3
@mock_glue
def test_create_partition_already_exists_in_multiple_batches(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitions = sorted(self.helper.create_many_partitions(count=150))
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
# prime partitions list with two partitions, one in each group
already_exists = [partitions[5], partitions[115]]
errors = partitioner.create_partitions(already_exists)
errors.should.be.empty
# now attempt to create them as part of a large batch
errors = partitioner.create_partitions(partitions)
errors.should.have.length_of(2)
for idx, error in enumerate(errors):
partition = already_exists[idx]
error["PartitionValues"].should.equal(partition.values)
error["ErrorDetail"]["ErrorCode"].should.equal("AlreadyExistsException")
@mock_s3
@mock_glue
def test_create_partitions_on_disk_with_bad_table_location(self):
self.s3.create_bucket(Bucket=self.bucket)
database_input = self.helper.create_database_input()
self.glue.create_database(**database_input)
# no trailing slash for location is on purpose and what this
# test is checking against
table_input = self.helper.create_table_input(location=f"s3://{self.bucket}/{self.table}")
self.glue.create_table(**table_input)
partition = self.helper.create_partition_data()
subpath = "/".join(partition.values)
full_location = f"s3://{self.bucket}/{self.table}/{subpath}/"
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk()
found_partitions.should.have.length_of(1)
found_partitions[0].location.should.equal(full_location)
@mock_s3
@mock_glue
def test_find_partitions_in_glue_catalog(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partition = self.helper.create_partition_data()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.create_partitions([partition])
existing_partitions = partitioner.existing_partitions()
existing_partitions.should.have.length_of(1)
existing_partitions[0].values.should.equal(partition.values)
existing_partitions[0].location.should.equal(partition.location)
@mock_s3
@mock_glue
def test_partitions_to_create(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
already_created = self.helper.create_many_partitions(count=10, write=True)
to_create = self.helper.create_many_partitions(count=3, write=True)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.create_partitions(already_created)
found = partitioner.partitions_on_disk()
wants_to_create = partitioner.partitions_to_create(found)
set(wants_to_create).should.equal(set(to_create))
@mock_s3
@mock_glue
def test_partitions_to_create_empty_input(self):
# this is to test the early exit
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
wants_to_create = partitioner.partitions_to_create([])
wants_to_create.should.have.length_of(0)
@mock_s3
@mock_glue
def test_delete_partitions(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
self.helper.create_partition_data()
partition = self.helper.create_partition_data()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.create_partitions([partition])
mock = MagicMock(return_value=[])
partitioner.glue.batch_delete_partition = mock
to_delete = partitioner.existing_partitions()
partitioner.delete_partitions(to_delete)
mock.assert_called_with(
DatabaseName=self.database,
TableName=self.table,
PartitionsToDelete=[{"Values": to_delete[0].values}]
)
@mock_s3
@mock_glue
def test_delete_partitions_in_groups_of_twenty_five(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitions = sorted(self.helper.create_many_partitions(count=30))
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.create_partitions(partitions)
mock = MagicMock(return_value=[])
partitioner.glue.batch_delete_partition = mock
existing_partitions = partitioner.existing_partitions()
partitioner.delete_partitions(existing_partitions)
first_list = [{"Values": p.values} for p in partitions[:25]]
second_list = [{"Values": p.values} for p in partitions[25:]]
calls = [
call(
DatabaseName=self.database,
TableName=self.table,
PartitionsToDelete=first_list),
call(
DatabaseName=self.database,
TableName=self.table,
PartitionsToDelete=second_list),
]
mock.call_count.should.equal(2)
mock.assert_has_calls(calls)
@mock_s3
@mock_glue
def test_delete_nonexistent_partition(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partition = Partition(["2019", "01", "02", "03"], f"s3://{self.bucket}/{self.table}/2019/01/02/03/")
result = partitioner.delete_partitions([partition])
result.should.have.length_of(1)
result[0]["PartitionValues"].should.equal(["2019", "01", "02", "03"])
result[0]["ErrorDetail"]["ErrorCode"].should.equal("EntityNotFoundException")
@mock_s3
@mock_glue
def test_bad_partitions(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partition = self.helper.create_partition_data()
self.glue.create_partition(
DatabaseName=self.database,
TableName=self.table,
PartitionInput={
"Values": partition.values,
"StorageDescriptor": {
"Location": "s3://not-a-bucket/who-cares/"
},
})
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
bad_partitions = partitioner.bad_partitions()
bad_partitions.should.have.length_of(1)
bad_partitions[0].values.should.equal(partition.values)
bad_partitions[0].location.should.equal("s3://not-a-bucket/who-cares/")
@mock_s3
@mock_glue
def test_missing_partitions(self):
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
self.glue.create_partition(
DatabaseName=self.database,
TableName=self.table,
PartitionInput={
"Values": ["2019", "01", "02", "03"],
"StorageDescriptor": {
"Location": "s3://not-a-bucket/who-cares/"
},
})
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
missing_partitions = partitioner.missing_partitions()
missing_partitions.should.have.length_of(1)
missing_partitions[0].values.should.equal(["2019", "01", "02", "03"])
@mock_s3
@mock_glue
def test_find_moved_partitions(self):
old_location = "s3://old-bucket/table/"
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitions = sorted(self.helper.create_many_partitions(count=15))
batch_input = []
for partition in partitions:
batch_input.append({
"Values": partition.values,
"StorageDescriptor": {
"Location": f"{old_location}/data/"
}
})
self.glue.batch_create_partition(
DatabaseName=self.database,
TableName=self.table,
PartitionInputList=batch_input)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
moved = partitioner.find_moved_partitions()
moved.should.have.length_of(len(partitions))
moved.sort()
partitions.sort()
for idx, partition in enumerate(partitions):
moved[idx].should.equal(partition)
@mock_s3
@mock_glue
def test_find_moved_partitions_with_missing_partitions(self):
old_location = "s3://old-bucket/table/"
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
self.glue.create_partition(
DatabaseName=self.database,
TableName=self.table,
PartitionInput={
"Values": ["2019", "01", "01", "01"],
"StorageDescriptor": {"Location": f"{old_location}/data/"}
})
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
mock = MagicMock()
partitioner.glue.update_partition = mock
updated = partitioner.find_moved_partitions()
updated.should.be.empty
@mock_s3
@mock_glue
def test_update_partition_locations(self):
old_location = "s3://old-bucket/table/"
self.s3.create_bucket(Bucket=self.bucket)
self.helper.make_database_and_table()
partitions = sorted(self.helper.create_many_partitions(count=15))
batch_input = []
calls = []
for partition in partitions:
batch_input.append({
"Values": partition.values,
"StorageDescriptor": {
"Location": f"{old_location}/data/"
}
})
calls.append(call(
DatabaseName=self.database,
TableName=self.table,
PartitionValueList=partition.values,
PartitionInput=ANY))
self.glue.batch_create_partition(
DatabaseName=self.database,
TableName=self.table,
PartitionInputList=batch_input)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
mock = MagicMock()
partitioner.glue.update_partition = mock
moved = partitioner.find_moved_partitions()
errors = partitioner.update_partition_locations(moved)
errors.should.be.empty
mock.assert_has_calls(calls, any_order=True)
@mock_glue
def test_update_partition_locations_with_non_existent_partition(self):
self.helper.make_database_and_table()
bad_partition = Partition(["2019", "01", "01", "01"], "s3://who/cares/")
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
mock = MagicMock()
partitioner.glue.update_partition = mock
errors = partitioner.update_partition_locations([bad_partition])
errors.should.have.length_of(1)
errors[0]["Partition"].should.equal(bad_partition.values)
mock.assert_not_called()
@mock_glue
def test_update_partition_locations_with_mix_of_good_and_bad(self):
self.helper.make_database_and_table()
good_old_location = "s3://old-bucket/table/data1/"
good_new_location = f"s3://{self.bucket}/{self.table}/2019-01-01-01/"
good_partition = Partition(["2019", "01", "01", "01"], good_old_location)
bad_partition = Partition(["2018", "02", "02", "02"], "s3://old-bucket/table/data2/")
self.glue.create_partition(
DatabaseName=self.database,
TableName=self.table,
PartitionInput={
"Values": good_partition.values,
"StorageDescriptor": {"Location": good_partition.location}
})
good_partition.location = good_new_location
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
mock = MagicMock()
partitioner.glue.update_partition = mock
errors = partitioner.update_partition_locations([bad_partition, good_partition])
mock.assert_called_with(
DatabaseName=self.database,
TableName=self.table,
PartitionValueList=good_partition.values,
PartitionInput={
"Values": good_partition.values,
"StorageDescriptor": {"Location": good_new_location}
})
errors.should.have.length_of(1)
errors[0]["Partition"].should.equal(bad_partition.values)
@mock_glue
@mock_s3
def test_find_partitions_simple_schema(self):
"""Partitioner.partitions_on_disk should work with AWSLogs-like tables"""
self.s3.create_bucket(Bucket=self.bucket)
db_input = self.helper.create_database_input()
self.glue.create_database(**db_input)
table_input = self.helper.create_table_input()
table_input["TableInput"]["PartitionKeys"] = [
{"Name": "dt", "Type": "string"},
]
self.glue.create_table(**table_input)
# create initial partition
prefix = table_input["TableInput"]["StorageDescriptor"]["Location"]
location = f"{prefix}/2019/01/02/"
s3_key = f"{location}object.json"
splits = s3_key[len("s3://"):].split("/", 1)
bucket = splits[0]
path = splits[1]
self.s3.put_object(
Body='{"foo": "bar"}',
Bucket=bucket,
Key=path,
)
partitions = [Partition(["2019-01-02"], location)]
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk()
set(found_partitions).should.equal(set(partitions))
@mock_glue
@mock_s3
def test_find_partitions_single_key(self):
"""Partitioner.partitions_on_disk should work with single-key tables, in hive-format"""
self.s3.create_bucket(Bucket=self.bucket)
db_input = self.helper.create_database_input()
self.glue.create_database(**db_input)
table_input = self.helper.create_table_input()
table_input["TableInput"]["PartitionKeys"] = [
{"Name": "dt", "Type": "string"},
]
self.glue.create_table(**table_input)
# create initial partition
prefix = table_input["TableInput"]["StorageDescriptor"]["Location"]
location = f"{prefix}/dt=2019-01-02/"
s3_key = f"{location}object.json"
splits = s3_key[len("s3://"):].split("/", 1)
bucket = splits[0]
path = splits[1]
self.s3.put_object(
Body='{"foo": "bar"}',
Bucket=bucket,
Key=path,
)
partitions = [Partition(["2019-01-02"], location)]
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk()
set(found_partitions).should.equal(set(partitions))
@mock_glue
@mock_s3
def test_find_partitions_with_limit_no_hour_partition(self):
"""Partitioner.partitions_on_disk, limit_days set,
on a table partitioned by day, should work"""
self.s3.create_bucket(Bucket=self.bucket)
db_input = self.helper.create_database_input()
self.glue.create_database(**db_input)
table_input = self.helper.create_table_input(location=f"s3://{self.bucket}/{self.table}/")
table_input["TableInput"]["PartitionKeys"] = [
{"Name": "year", "Type": "string"},
{"Name": "month", "Type": "string"},
{"Name": "day", "Type": "string"},
]
self.glue.create_table(**table_input)
today = pendulum.now()
partitions = []
for i in range(1, 11):
partition_date = today.subtract(days=i)
year = partition_date.strftime("%Y")
month = partition_date.strftime("%m")
day = partition_date.strftime("%d")
partition = Partition([year, month, day], f"s3://{self.bucket}/{self.table}/{year}/{month}/{day}/")
self.helper.write_partition_to_s3(partition)
partitions.append(partition)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
found_partitions = partitioner.partitions_on_disk(limit_days=4)
found_partitions.should.have.length_of(4)
set(found_partitions).should.equal(set(partitions[0:4]))
@mock_glue
@mock_s3
def test_find_partitions_with_limit_bad_partition_keys(self):
"""Partitioner.partitions_on_disk, limit_days set,
on a single-partition table raises an error"""
self.s3.create_bucket(Bucket=self.bucket)
db_input = self.helper.create_database_input()
self.glue.create_database(**db_input)
table_input = self.helper.create_table_input(location=f"s3://{self.bucket}/{self.table}/")
table_input["TableInput"]["PartitionKeys"] = [
{"Name": "dt", "Type": "string"},
]
self.glue.create_table(**table_input)
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.partitions_on_disk.when.called_with(limit_days=4).should.have.raised(TypeError)
@mock_glue
def test_update_partition_storage_descriptors(self):
"""Partitioner.update_storage_descriptors() updates the storage descriptors of all partitions"""
self.helper.make_database_and_table()
partitioner = Partitioner(self.database, self.table, aws_region=self.region)
partitioner.create_partitions(self.helper.create_many_partitions(write=False))
# get and update table
columns = [
{"Name": "foo", "Type": "string"},
{"Name": "bar", "Type": "string"},
{"Name": "only-in-this-test", "Type": "string"},
]
table = partitioner.glue.get_table(DatabaseName=self.database, Name=self.table)["Table"]
for key in ["DatabaseName", "CreateTime", "CreatedBy", "IsRegisteredWithLakeFormation", "CatalogId"]:
if key in table:
del table[key]
table["StorageDescriptor"]["Columns"] = columns
partitioner.glue.update_table(DatabaseName=self.database, TableInput=table)
errors = partitioner.update_partition_storage_descriptors()
errors.should.have.length_of(0)
for partition in partitioner.existing_partitions():
partition.raw["StorageDescriptor"]["Columns"].should.equal(columns)
class PartitionTest(TestCase):
def test_partition_comparisons(self):
p1 = Partition(["2019", "01", "01", "01"], "s3://bucket/table/")
p2 = Partition(["2019", "02", "02", "02"], "s3://bucket/table2/")
(p1 > p2).should.be.false
(p1 < p2).should.be.true
p3 = Partition(["2019", "01", "01", "01"], "s3://bucket/table/")
(p1 == p3).should.be.true
p1._cmp(p3).should.equal(0)
p4 = Partition(["2019", "01", "01", "01"], "s3://bucket/z-table/")
(p1 > p4).should.be.true
(p4 > p1).should.be.false
def test_parse_from_aws(self):
normal_aws_response = {
"Values": ["2019", "01", "02", "03"],
"StorageDescriptor": {
"Location": "s3://bucket/location/2019/01/02/03/",
},
}
partition = Partition.from_aws_response(normal_aws_response)
partition.values[0].should.equal("2019")
partition.values[1].should.equal("01")
partition.values[2].should.equal("02")
partition.values[3].should.equal("03")
partition.location.should.equal("s3://bucket/location/2019/01/02/03/")
# Conform location gets normalized by Partition
bad_location_aws_response = {
"Values": ["2019", "01", "02", "03"],
"StorageDescriptor": {
"Location": "s3://bucket/location/2019/01/02/03",
},
}
partition2 = Partition.from_aws_response(bad_location_aws_response)
partition2.values[0].should.equal("2019")
partition2.values[1].should.equal("01")
partition2.values[2].should.equal("02")
partition2.values[3].should.equal("03")
partition2.location.should.equal("s3://bucket/location/2019/01/02/03/")
partition2.should.equal(partition)
``` |
{
"source": "journeyfan/toontown-journey",
"score": 3
} |
#### File: otp/chat/WhiteList.py
```python
from bisect import bisect_left
class WhiteList:
def __init__(self, words):
self.words = words
self.numWords = len(self.words)
def cleanText(self, text):
text = text.strip('.,?!')
return text.lower()
def isWord(self, text):
return self.cleanText(text) in self.words
def isPrefix(self, text):
text = self.cleanText(text)
i = bisect_left(self.words, text)
if i == self.numWords:
return False
return self.words[i].startswith(text)
def prefixCount(self, text):
text = self.cleanText(text)
i = bisect_left(self.words, text)
j = i
while j < self.numWords and self.words[j].startswith(text):
j += 1
return j - i
def prefixList(self, text):
text = self.cleanText(text)
i = bisect_left(self.words, text)
j = i
while j < self.numWords and self.words[j].startswith(text):
j += 1
return self.words[i:j]
```
#### File: otp/speedchat/SCCustomMenu.py
```python
from .SCCustomTerminal import SCCustomTerminal
from .SCMenu import SCMenu
from otp.otpbase.OTPLocalizer import CustomSCStrings
class SCCustomMenu(SCMenu):
def __init__(self):
SCMenu.__init__(self)
self.accept('customMessagesChanged', self.__customMessagesChanged)
self.__customMessagesChanged()
def __customMessagesChanged(self):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
for msgIndex in lt.customMessages:
if msgIndex in CustomSCStrings:
self.append(SCCustomTerminal(msgIndex))
```
#### File: toontown/chat/TTChatInputWhiteList.py
```python
from otp.chat.ChatInputWhiteListFrame import ChatInputWhiteListFrame
from toontown.chat.TTWhiteList import TTWhiteList
from direct.showbase import DirectObject
from otp.otpbase import OTPGlobals
import sys
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from otp.otpbase import OTPLocalizer
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
class TTChatInputWhiteList(ChatInputWhiteListFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('TTChatInputWhiteList')
TFToggleKey = base.config.GetString('true-friend-toggle-key', 'alt')
TFToggleKeyUp = TFToggleKey + '-up'
def __init__(self, parent = None, **kw):
entryOptions = {'parent': self,
'relief': DGG.SUNKEN,
'scale': 0.035,
'frameColor': (0.9, 0.9, 0.85, 0.0),
'pos': (-0.2, 0, 0.11),
'entryFont': OTPGlobals.getInterfaceFont(),
'width': 8.6,
'numLines': 5,
'cursorKeys': 0,
'backgroundFocus': 0,
'suppressKeys': 0,
'suppressMouse': 1,
'command': self.sendChat,
'failedCommand': self.sendFailed,
'focus': 0,
'text': '',
'sortOrder': DGG.FOREGROUND_SORT_INDEX}
ChatInputWhiteListFrame.__init__(self, entryOptions, parent, **kw)
self.whiteList = TTWhiteList()
base.whiteList = self.whiteList
base.ttwl = self
self.autoOff = 1
self.sendBy = 'Data'
self.prefilter = 0
self.promoteWhiteList = 1
self.typeGrabbed = 0
self.deactivate()
gui = loader.loadModel('phase_3.5/models/gui/chat_input_gui')
self.chatFrame = DirectFrame(parent=self, image=gui.find('**/Chat_Bx_FNL'), relief=None, pos=(0.0, 0, 0.0), state=DGG.NORMAL)
self.chatButton = DirectButton(parent=self.chatFrame, image=(gui.find('**/ChtBx_ChtBtn_UP'), gui.find('**/ChtBx_ChtBtn_DN'), gui.find('**/ChtBx_ChtBtn_RLVR')), pos=(0.182, 0, -0.088), relief=None, text=('', OTPLocalizer.ChatInputNormalSayIt, OTPLocalizer.ChatInputNormalSayIt), text_scale=0.06, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.09), textMayChange=0, command=self.chatButtonPressed)
self.cancelButton = DirectButton(parent=self.chatFrame, image=(gui.find('**/CloseBtn_UP'), gui.find('**/CloseBtn_DN'), gui.find('**/CloseBtn_Rllvr')), pos=(-0.151, 0, -0.088), relief=None, text=('', OTPLocalizer.ChatInputNormalCancel, OTPLocalizer.ChatInputNormalCancel), text_scale=0.06, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -0.09), textMayChange=0, command=self.cancelButtonPressed)
self.whisperLabel = DirectLabel(parent=self.chatFrame, pos=(0.02, 0, 0.23), relief=DGG.FLAT, frameColor=(1, 1, 0.5, 1), frameSize=(-0.23,
0.23,
-0.07,
0.05), text=OTPLocalizer.ChatInputNormalWhisper, text_scale=0.04, text_fg=Vec4(0, 0, 0, 1), text_wordwrap=9.5, textMayChange=1)
self.chatEntry.bind(DGG.OVERFLOW, self.chatOverflow)
self.chatEntry.bind(DGG.TYPE, self.typeCallback)
self.trueFriendChat = 0
if base.config.GetBool('whisper-to-nearby-true-friends', 1):
self.accept(self.TFToggleKey, self.shiftPressed)
return
def shiftPressed(self):
self.ignore(self.TFToggleKey)
self.trueFriendChat = 1
self.accept(self.TFToggleKeyUp, self.shiftReleased)
def shiftReleased(self):
self.ignore(self.TFToggleKeyUp)
self.trueFriendChat = 0
self.accept(self.TFToggleKey, self.shiftPressed)
def handleTypeGrab(self):
self.ignore('typeEntryGrab')
self.accept('typeEntryRelease', self.handleTypeRelease)
self.typeGrabbed = 1
def handleTypeRelease(self):
self.ignore('typeEntryRelease')
self.accept('typeEntryGrab', self.handleTypeGrab)
self.typeGrabbed = 0
def typeCallback(self, extraArgs):
try:
if self.typeGrabbed:
return
self.applyFilter(extraArgs)
if localAvatar.chatMgr.chatInputWhiteList.isActive():
return
else:
messenger.send('wakeup')
messenger.send('enterNormalChat')
except UnicodeDecodeError:
return
def destroy(self):
self.chatEntry.destroy()
self.chatFrame.destroy()
self.ignoreAll()
ChatInputWhiteListFrame.destroy(self)
def delete(self):
base.whiteList = None
ChatInputWhiteListFrame.delete(self)
return
def sendChat(self, text, overflow = False):
if self.typeGrabbed:
return
else:
ChatInputWhiteListFrame.sendChat(self, self.chatEntry.get())
def sendChatByData(self, text):
if self.trueFriendChat:
for friendId, flags in base.localAvatar.friendsList:
if flags & ToontownGlobals.FriendChat:
self.sendWhisperByFriend(friendId, text)
elif not self.receiverId:
base.talkAssistant.sendOpenTalk(text)
elif self.receiverId and not self.toPlayer:
base.talkAssistant.sendWhisperTalk(text, self.receiverId)
elif self.receiverId and self.toPlayer:
base.talkAssistant.sendAccountTalk(text, self.receiverId)
def sendWhisperByFriend(self, avatarId, text):
online = 0
if avatarId in base.cr.doId2do:
online = 1
avatarUnderstandable = 0
av = None
if avatarId:
av = base.cr.identifyAvatar(avatarId)
if av != None:
avatarUnderstandable = av.isUnderstandable()
if avatarUnderstandable and online:
base.talkAssistant.sendWhisperTalk(text, avatarId)
return
def chatButtonPressed(self):
if self.okayToSubmit:
self.sendChat(self.chatEntry.get())
else:
self.sendFailed(self.chatEntry.get())
def cancelButtonPressed(self):
self.requestMode('Off')
localAvatar.chatMgr.fsm.request('mainMenu')
def enterAllChat(self):
ChatInputWhiteListFrame.enterAllChat(self)
self.whisperLabel.hide()
def exitAllChat(self):
ChatInputWhiteListFrame.exitAllChat(self)
def enterPlayerWhisper(self):
ChatInputWhiteListFrame.enterPlayerWhisper(self)
self.labelWhisper()
def exitPlayerWhisper(self):
ChatInputWhiteListFrame.exitPlayerWhisper(self)
self.whisperLabel.hide()
def enterAvatarWhisper(self):
ChatInputWhiteListFrame.enterAvatarWhisper(self)
self.labelWhisper()
def exitAvatarWhisper(self):
ChatInputWhiteListFrame.exitAvatarWhisper(self)
self.whisperLabel.hide()
def labelWhisper(self):
if self.receiverId:
self.whisperName = base.talkAssistant.findName(self.receiverId, self.toPlayer)
self.whisperLabel['text'] = OTPLocalizer.ChatInputWhisperLabel % self.whisperName
self.whisperLabel.show()
else:
self.whisperLabel.hide()
def applyFilter(self, keyArgs, strict = False):
text = self.chatEntry.get(plain=True)
if text.startswith('~'):
self.okayToSubmit = True
else:
words = text.split(' ')
newwords = []
self.okayToSubmit = True
flag = 0
for friendId, flags in base.localAvatar.friendsList:
if flags & ToontownGlobals.FriendChat:
flag = 1
for word in words:
if word == '' or self.whiteList.isWord(word) or not base.cr.whiteListChatEnabled:
newwords.append(word)
else:
if self.checkBeforeSend:
self.okayToSubmit = False
else:
self.okayToSubmit = True
if flag:
newwords.append('\x01WLDisplay\x01' + word + '\x02')
else:
newwords.append('\x01WLEnter\x01' + word + '\x02')
if not strict:
lastword = words[-1]
try:
if lastword == '' or self.whiteList.isPrefix(lastword) or not base.cr.whiteListChatEnabled:
newwords[-1] = lastword
elif flag:
newwords[-1] = '\x01WLDisplay\x01' + lastword + '\x02'
else:
newwords[-1] = '\x01WLEnter\x01' + lastword + '\x02'
except UnicodeDecodeError:
self.okayToSubmit = False
newtext = ' '.join(newwords)
self.chatEntry.set(newtext)
self.chatEntry.guiItem.setAcceptEnabled(self.okayToSubmit)
```
#### File: toontown/classicchars/DistributedJailbirdDale.py
```python
from direct.showbase.ShowBaseGlobal import *
from . import DistributedCCharBase
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from . import CharStateDatas
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from . import DistributedDale
class DistributedJailbirdDale(DistributedDale.DistributedDale):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedJailbirdDale')
def __init__(self, cr):
try:
self.DistributedDale_initialized
except:
self.DistributedDale_initialized = 1
DistributedCCharBase.DistributedCCharBase.__init__(self, cr, TTLocalizer.JailbirdDale, 'jda')
self.fsm = ClassicFSM.ClassicFSM(self.getName(), [State.State('Off', self.enterOff, self.exitOff, ['Neutral']), State.State('Neutral', self.enterNeutral, self.exitNeutral, ['Walk']), State.State('Walk', self.enterWalk, self.exitWalk, ['Neutral'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
self.nametag.setText(TTLocalizer.Dale)
```
#### File: toontown/coghq/CogHQLoader.py
```python
from . import CogHQLobby
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from pandac.PandaModules import *
from toontown.hood import QuietZoneState
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.town import TownBattle
class CogHQLoader(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CogHQLoader')
def __init__(self, hood, parentFSMState, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.hood = hood
self.parentFSMState = parentFSMState
self.placeDoneEvent = 'cogHQLoaderPlaceDone'
self.townBattleDoneEvent = 'town-battle-done'
self.fsm = ClassicFSM.ClassicFSM('CogHQLoader', [State.State('start', None, None, ['quietZone', 'cogHQExterior', 'cogHQBossBattle']),
State.State('cogHQExterior', self.enterCogHQExterior, self.exitCogHQExterior, ['quietZone', 'cogHQLobby']),
State.State('cogHQLobby', self.enterCogHQLobby, self.exitCogHQLobby, ['quietZone', 'cogHQExterior', 'cogHQBossBattle']),
State.State('cogHQBossBattle', self.enterCogHQBossBattle, self.exitCogHQBossBattle, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['cogHQExterior', 'cogHQLobby', 'cogHQBossBattle']),
State.State('final', None, None, ['start'])], 'start', 'final')
return
def load(self, zoneId):
self.parentFSMState.addChild(self.fsm)
self.music = base.loader.loadMusic(self.musicFile)
self.battleMusic = base.loader.loadMusic('phase_9/audio/bgm/encntr_suit_winning.ogg')
self.townBattle = TownBattle.TownBattle(self.townBattleDoneEvent)
self.townBattle.load()
Suit.loadSuits(3)
self.loadPlaceGeom(zoneId)
def loadPlaceGeom(self, zoneId):
pass
def unloadPlaceGeom(self):
pass
def unload(self):
self.unloadPlaceGeom()
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
del self.fsm
self.townBattle.unload()
self.townBattle.cleanup()
del self.townBattle
del self.battleMusic
Suit.unloadSuits(3)
Suit.unloadSkelDialog()
del self.hood
ModelPool.garbageCollect()
TexturePool.garbageCollect()
def enter(self, requestStatus):
self.fsm.enterInitialState()
self.fsm.request(requestStatus['where'], [requestStatus])
def exit(self):
self.ignoreAll()
def enterQuietZone(self, requestStatus):
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState.QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getRequestStatus()
self.fsm.request(status['where'], [status])
def enterPlace(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.placeDone)
self.place = self.placeClass(self, self.fsm, self.placeDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
def exitPlace(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def placeDone(self):
self.requestStatus = self.place.doneStatus
status = self.place.doneStatus
if status.get('shardId') == None and self.isInThisHq(status):
self.unloadPlaceGeom()
zoneId = status['zoneId']
self.loadPlaceGeom(zoneId)
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def isInThisHq(self, status):
if ZoneUtil.isDynamicZone(status['zoneId']):
return status['hoodId'] == self.hood.hoodId
else:
return ZoneUtil.getHoodId(status['zoneId']) == self.hood.hoodId
def enterCogHQExterior(self, requestStatus):
self.placeClass = self.getExteriorPlaceClass()
self.enterPlace(requestStatus)
self.hood.spawnTitleText(requestStatus['zoneId'])
def exitCogHQExterior(self):
taskMgr.remove('titleText')
self.hood.hideTitleText()
self.exitPlace()
self.placeClass = None
return
def enterCogHQLobby(self, requestStatus):
self.placeClass = CogHQLobby.CogHQLobby
self.enterPlace(requestStatus)
self.hood.spawnTitleText(requestStatus['zoneId'])
def exitCogHQLobby(self):
taskMgr.remove('titleText')
self.hood.hideTitleText()
self.exitPlace()
self.placeClass = None
return
def enterCogHQBossBattle(self, requestStatus):
self.placeClass = self.getBossPlaceClass()
self.enterPlace(requestStatus)
def exitCogHQBossBattle(self):
self.exitPlace()
self.placeClass = None
return
```
#### File: toontown/coghq/DistributedCrusherEntityAI.py
```python
from otp.level import DistributedEntityAI
from direct.directnotify import DirectNotifyGlobal
class DistributedCrusherEntityAI(DistributedEntityAI.DistributedEntityAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCrusherEntityAI')
def __init__(self, level, entId):
self.isCrusher = 0
self.crushCell = None
DistributedEntityAI.DistributedEntityAI.__init__(self, level, entId)
self.crushMsg = self.getUniqueName('crusherDoCrush')
def generate(self):
DistributedEntityAI.DistributedEntityAI.generate(self)
self.setActiveCrushCell()
def delete(self):
self.ignoreAll()
DistributedEntityAI.DistributedEntityAI.delete(self)
def destroy(self):
self.notify.info('destroy entity %s' % self.entId)
if self.crushCell != None:
self.crushCell.unregisterCrusher(self.entId)
self.crushCell = None
DistributedEntityAI.DistributedEntityAI.destroy(self)
def setActiveCrushCell(self):
self.notify.debug('setActiveCrushCell, entId: %d' % self.entId)
if self.crushCellId != None:
self.crushCell = self.level.entities.get(self.crushCellId, None)
if self.crushCell == None:
self.accept(self.level.getEntityCreateEvent(self.crushCellId), self.setActiveCrushCell)
else:
self.isCrusher = 1
self.crushCell.registerCrusher(self.entId)
def sendCrushMsg(self, axis = 0):
if self.isCrusher:
messenger.send(self.crushMsg, [
self.entId,
axis])
def getPosition(self):
if hasattr(self, 'pos'):
return self.pos
```
#### File: toontown/coghq/LawbotHQExterior.py
```python
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from pandac.PandaModules import *
from toontown.battle import BattlePlace
from toontown.building import Elevator
from toontown.coghq import CogHQExterior
from toontown.dna.DNAParser import loadDNAFileAI
from libpandadna import DNAStorage
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
class LawbotHQExterior(CogHQExterior.CogHQExterior):
notify = DirectNotifyGlobal.directNotify.newCategory('LawbotHQExterior')
def enter(self, requestStatus):
CogHQExterior.CogHQExterior.enter(self, requestStatus)
# Load the CogHQ DNA file:
dnaStore = DNAStorage()
dnaFileName = self.genDNAFileName(self.zoneId)
loadDNAFileAI(dnaStore, dnaFileName)
# Collect all of the vis group zone IDs:
self.zoneVisDict = {}
for i in range(dnaStore.getNumDNAVisGroupsAI()):
groupFullName = dnaStore.getDNAVisGroupName(i)
visGroup = dnaStore.getDNAVisGroupAI(i)
visZoneId = int(base.cr.hoodMgr.extractGroupName(groupFullName))
visZoneId = ZoneUtil.getTrueZoneId(visZoneId, self.zoneId)
visibles = []
for i in range(visGroup.getNumVisibles()):
visibles.append(int(visGroup.getVisible(i)))
visibles.append(ZoneUtil.getBranchZone(visZoneId))
self.zoneVisDict[visZoneId] = visibles
# Next, we want interest in all vis groups due to this being a Cog HQ:
base.cr.sendSetZoneMsg(self.zoneId, list(self.zoneVisDict.values())[0])
```
#### File: toontown/distributed/ToontownDistrictAI.py
```python
from direct.directnotify.DirectNotifyGlobal import directNotify
import time
from otp.distributed.DistributedDistrictAI import DistributedDistrictAI
class ToontownDistrictAI(DistributedDistrictAI):
notify = directNotify.newCategory('ToontownDistrictAI')
created = 0
ahnnLog = 0
def announceGenerate(self):
DistributedDistrictAI.announceGenerate(self)
# Remember the time of which this district was created:
self.created = int(time.time())
# We want to handle shard status queries so that a ShardStatusReceiver
# being created after we're generated will know where we're at:
self.air.netMessenger.accept('queryShardStatus', self, self.handleShardStatusQuery)
# Send a shard status update with the information we have:
status = {
'available': bool(self.available),
'name': self.name,
'created': int(time.time())
}
self.air.netMessenger.send('shardStatus', [self.air.ourChannel, status])
# Add a post remove shard status update in-case we go down:
status = {'available': False}
datagram = self.air.netMessenger.prepare('shardStatus', [self.air.ourChannel, status])
self.air.addPostRemove(datagram)
def handleShardStatusQuery(self):
# Send a shard status update with the information we have:
status = {
'available': bool(self.available),
'name': self.name,
'created': int(time.time())
}
self.air.netMessenger.send('shardStatus', [self.air.ourChannel, status])
def allowAHNNLog(self, ahnnLog):
self.ahnnLog = ahnnLog
def d_allowAHNNLog(self, ahnnLog):
self.sendUpdate('allowAHNNLog', [ahnnLog])
def b_allowAHNNLog(self, ahnnLog):
self.allowAHNNLog(ahnnLog)
self.d_allowAHNNLog(ahnnLog)
def getAllowAHNNLog(self):
return self.ahnnLog
def setName(self, name):
DistributedDistrictAI.setName(self, name)
# Send a shard status update containing our name:
status = {'name': name}
self.air.netMessenger.send('shardStatus', [self.air.ourChannel, status])
def setAvailable(self, available):
DistributedDistrictAI.setAvailable(self, available)
# Send a shard status update containing our availability:
status = {'available': bool(available)}
self.air.netMessenger.send('shardStatus', [self.air.ourChannel, status])
```
#### File: toontown/estate/BankGUI.py
```python
from direct.directnotify import DirectNotifyGlobal
from direct.gui.DirectGui import *
from direct.task.Task import Task
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
class BankGUI(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('BankGUI')
def __init__(self, doneEvent, allowWithdraw = 1):
backGeom = loader.loadModel('phase_3/models/gui/tt_m_gui_ups_panelBg')
DirectFrame.__init__(self, relief=None, geom=backGeom, geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.66, 1, 1.4), pos=(0, 0, 0))
self.initialiseoptions(BankGUI)
self.doneEvent = doneEvent
self.__transactionAmount = 0
bankGuiModel = loader.loadModel('phase_3/models/gui/bank_GUI')
jarGui = bankGuiModel.find('**/jar')
arrowGui = loader.loadModel('phase_3/models/gui/create_a_toon_gui')
bankModel = bankGuiModel.find('**/vault')
okImageList = (
bankGuiModel.find('**/Ok_UP'),
bankGuiModel.find('**/Ok_DN'),
bankGuiModel.find('**/Ok_RLVR')
)
cancelImageList = (
bankGuiModel.find('**/Cancel_UP'),
bankGuiModel.find('**/Cancel_DN'),
bankGuiModel.find('**/Cancel_RLVR')
)
arrowImageList = (
arrowGui.find('**/CrtATn_R_Arrow_UP'),
arrowGui.find('**/CrtATn_R_Arrow_DN'),
arrowGui.find('**/CrtATn_R_Arrow_RLVR'),
arrowGui.find('**/CrtATn_R_Arrow_UP')
)
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, pos=(-0.2, 0, -0.4), text=TTLocalizer.BankGuiCancel, text_scale=0.06, text_pos=(0, -0.1), image_scale=0.6, command=self.__cancel)
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, pos=(0.2, 0, -0.4), text=TTLocalizer.BankGuiOk, text_scale=0.06, text_pos=(0, -0.1), image_scale=0.6, command=self.__requestTransaction)
self.jarDisplay = DirectLabel(parent=self, relief=None, pos=(-0.4, 0, 0), scale=0.7, text=str(base.localAvatar.getMoney()), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0, -0.1, 0), image=jarGui, text_font=ToontownGlobals.getSignFont())
self.bankDisplay = DirectLabel(parent=self, relief=None, pos=(0.4, 0, 0), scale=0.9, text=str(base.localAvatar.getBankMoney()), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0, -0.1, 0), image=bankModel, image_pos=(0.025, 0, 0),image_scale=0.8, text_font=ToontownGlobals.getSignFont())
self.depositArrow = DirectButton(parent=self, relief=None, image=arrowImageList, image_scale=(1, 1, 1), image3_color=Vec4(0.6, 0.6, 0.6, 0.25), pos=(0.01, 0, 0.15))
self.withdrawArrow = DirectButton(parent=self, relief=None, image=arrowImageList, image_scale=(-1, 1, 1), image3_color=Vec4(0.6, 0.6, 0.6, 0.25), pos=(-0.01, 0, -0.15))
self.depositArrow.bind(DGG.B1PRESS, self.__depositButtonDown)
self.depositArrow.bind(DGG.B1RELEASE, self.__depositButtonUp)
self.withdrawArrow.bind(DGG.B1PRESS, self.__withdrawButtonDown)
self.withdrawArrow.bind(DGG.B1RELEASE, self.__withdrawButtonUp)
self.accept('bankAsleep', self.__cancel)
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__bankMoneyChange)
if allowWithdraw:
self.depositArrow.setPos(0.01, 0, 0.15)
self.withdrawArrow.setPos(-0.01, 0, -0.15)
else:
self.depositArrow.setPos(0, 0, 0)
self.withdrawArrow.hide()
jarGui.removeNode()
arrowGui.removeNode()
bankGuiModel.removeNode()
self.__updateTransaction(0)
def destroy(self):
taskMgr.remove(self.taskName('runCounter'))
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
DirectFrame.destroy(self)
def __cancel(self):
messenger.send(self.doneEvent, [0])
def __requestTransaction(self):
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
messenger.send(self.doneEvent, [self.__transactionAmount])
def __updateTransaction(self, amount):
hitLimit = 0
self.__transactionAmount += amount
jarMoney = base.localAvatar.getMoney()
maxJarMoney = base.localAvatar.getMaxMoney()
bankMoney = base.localAvatar.getBankMoney()
maxBankMoney = ToontownGlobals.MaxBankMoney
self.__transactionAmount = min(self.__transactionAmount, jarMoney)
self.__transactionAmount = min(self.__transactionAmount, maxBankMoney - bankMoney)
self.__transactionAmount = -min(-self.__transactionAmount, maxJarMoney - jarMoney)
self.__transactionAmount = -min(-self.__transactionAmount, bankMoney)
newJarMoney = jarMoney - self.__transactionAmount
newBankMoney = bankMoney + self.__transactionAmount
if newJarMoney <= 0 or newBankMoney >= maxBankMoney:
self.depositArrow['state'] = DGG.DISABLED
hitLimit = 1
else:
self.depositArrow['state'] = DGG.NORMAL
if newBankMoney <= 0 or newJarMoney >= maxJarMoney:
self.withdrawArrow['state'] = DGG.DISABLED
hitLimit = 1
else:
self.withdrawArrow['state'] = DGG.NORMAL
self.jarDisplay['text'] = str(newJarMoney)
self.bankDisplay['text'] = str(newBankMoney)
return (hitLimit, newJarMoney, newBankMoney, self.__transactionAmount)
def __runCounter(self, task):
if task.time - task.prevTime < task.delayTime:
return Task.cont
task.delayTime /= 2
task.prevTime = task.time
if task.delayTime < 0.005:
task.amount *= 1.1
hitLimit = self.__updateTransaction(int(task.amount))[0]
if hitLimit:
return Task.done
return Task.cont
def __depositButtonUp(self, event):
messenger.send('wakeup')
taskMgr.remove(self.taskName('runCounter'))
def __depositButtonDown(self, event):
messenger.send('wakeup')
task = Task(self.__runCounter)
task.delayTime = 0.2
task.prevTime = 0.0
task.amount = 1.0
hitLimit = self.__updateTransaction(int(task.amount))[0]
if not hitLimit:
taskMgr.add(task, self.taskName('runCounter'))
def __withdrawButtonUp(self, event):
messenger.send('wakeup')
taskMgr.remove(self.taskName('runCounter'))
def __withdrawButtonDown(self, event):
messenger.send('wakeup')
task = Task(self.__runCounter)
task.delayTime = 0.2
task.prevTime = 0.0
task.amount = -1.0
hitLimit = self.__updateTransaction(int(task.amount))[0]
if not hitLimit:
taskMgr.add(task, self.taskName('runCounter'))
def __moneyChange(self, money):
self.__updateTransaction(0)
def __bankMoneyChange(self, bankMoney):
self.__updateTransaction(0)
```
#### File: toontown/hood/DDHood.py
```python
from pandac.PandaModules import Vec4, Fog
from toontown.safezone.DDSafeZoneLoader import DDSafeZoneLoader
from toontown.town.DDTownLoader import DDTownLoader
from toontown.toonbase import ToontownGlobals
from toontown.hood.ToonHood import ToonHood
class DDHood(ToonHood):
notify = directNotify.newCategory('DDHood')
ID = ToontownGlobals.DonaldsDock
TOWNLOADER_CLASS = DDTownLoader
SAFEZONELOADER_CLASS = DDSafeZoneLoader
STORAGE_DNA = 'phase_6/dna/storage_DD.dna'
SKY_FILE = 'phase_3.5/models/props/BR_sky'
SPOOKY_SKY_FILE = 'phase_3.5/models/props/BR_sky'
TITLE_COLOR = (0.8, 0.6, 0.5, 1.0)
HOLIDAY_DNA = {
ToontownGlobals.WINTER_DECORATIONS: ['phase_6/dna/winter_storage_DD.dna'],
ToontownGlobals.WACKY_WINTER_DECORATIONS: ['phase_6/dna/winter_storage_DD.dna'],
ToontownGlobals.HALLOWEEN_PROPS: ['phase_6/dna/halloween_props_storage_DD.dna'],
ToontownGlobals.SPOOKY_PROPS: ['phase_6/dna/halloween_props_storage_DD.dna']}
def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):
ToonHood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)
# Load content pack ambience settings:
ambience = contentPacksMgr.getAmbience('donalds-dock')
color = ambience.get('underwater-color')
if color is not None:
try:
self.underwaterColor = Vec4(color['r'], color['g'], color['b'], color['a'])
except Exception as e:
raise ContentPackError(e)
elif self.underwaterColor is None:
self.underwaterColor = Vec4(0, 0, 0.6, 1)
def load(self):
ToonHood.load(self)
self.fog = Fog('DDFog')
```
#### File: toontown/makeatoon/FrameGUI.py
```python
from direct.gui.DirectGui import *
from .MakeAToonGlobals import *
class ChoiceFrame:
def __init__(self, parent, image_scale, images, pos, hpr, scale, text, text_scale, text_pos, button_command):
self.choice_box = DirectFrame(parent=parent, image=images[0], pos=pos, image_scale=image_scale,
relief=None, hpr=hpr, scale=scale, text=text, text_scale=text_scale,
text_pos=text_pos, text_fg=(1, 1, 1, 1))
self.left_arrow = DirectButton(parent=self.choice_box, relief=None, image=images[1],
image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, extraArgs=[-1],
image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=button_command)
self.right_arrow = DirectButton(parent=self.choice_box, relief=None, image=images[1],
image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale,
extraArgs=[1], image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0),
command=button_command)
def destroy(self):
self.choice_box.destroy()
self.left_arrow.destroy()
self.right_arrow.destroy()
del self.choice_box
del self.left_arrow
del self.right_arrow
```
#### File: toontown/minigame/DistributedDivingGameAI.py
```python
from .DistributedMinigameAI import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.actor import Actor
from . import DivingGameGlobals
import random
import random
import types
class DistributedDivingGameAI(DistributedMinigameAI):
fishProportions = []
for i in range(6):
fishProportions.append([])
n = 100
fishProportions[0]
fishProportions[0].append(([0, 0.8],
[0.8, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.8],
[0.8, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.7],
[0.7, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.5],
[0.5, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([n, 0.5],
[0.5, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[1]
fishProportions[1].append(([0, 0.8],
[0.8, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[1].append(([0, 0.8],
[0.8, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[1].append(([0, 0.7],
[0.7, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[1].append(([0, 0.7],
[0.7, 0.9],
[n, n],
[n, n],
[n, n],
[0.9, 1]))
fishProportions[1].append(([0, 0.4],
[0.4, 0.8],
[n, n],
[n, n],
[n, n],
[0.8, 1]))
fishProportions[1].append(([n, 0.3],
[0.3, 0.6],
[n, n],
[n, n],
[n, n],
[0.6, 1]))
fishProportions[2]
fishProportions[2].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[2].append(([0, 0.6],
[0.6, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[2].append(([0, 0.6],
[0.6, 0.8],
[n, n],
[0.8, 1],
[n, n],
[n, n]))
fishProportions[2].append(([0, 0.5],
[0.5, 0.7],
[n, n],
[0.7, 0.9],
[n, n],
[0.9, 1]))
fishProportions[2].append(([0, 0.2],
[0.2, 0.4],
[n, n],
[0.4, 0.75],
[n, n],
[0.75, 1]))
fishProportions[2].append(([n, 0.2],
[0.2, 0.6],
[n, n],
[0.6, 0.8],
[n, n],
[0.8, 1]))
fishProportions[3]
fishProportions[3].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[3].append(([0, 0.6],
[0.6, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[3].append(([0, 0.6],
[0.6, 0.8],
[n, n],
[0.95, 1],
[n, n],
[n, n]))
fishProportions[3].append(([0, 0.5],
[0.5, 0.7],
[n, n],
[0.7, 0.85],
[0.9, 0.95],
[0.95, 1]))
fishProportions[3].append(([0, 0.2],
[0.2, 0.4],
[n, n],
[0.4, 0.75],
[0.75, 0.85],
[0.85, 1]))
fishProportions[3].append(([n, 0.2],
[0.2, 0.6],
[n, n],
[0.6, 0.8],
[n, n],
[0.8, 1]))
fishProportions[4]
fishProportions[4].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[4].append(([0, 0.45],
[0.45, 0.9],
[n, n],
[0.9, 1],
[n, n],
[n, n]))
fishProportions[4].append(([0, 0.2],
[0.2, 0.5],
[n, n],
[0.5, 0.95],
[0.95, 1],
[n, n]))
fishProportions[4].append(([0, 0.1],
[0.1, 0.3],
[n, n],
[0.3, 0.75],
[0.75, 0.8],
[0.8, 1]))
fishProportions[4].append(([n, n],
[0, 0.15],
[n, n],
[0.15, 0.4],
[n, n],
[0.4, 1]))
fishProportions[4].append(([n, n],
[n, n],
[n, n],
[0, 0.4],
[n, n],
[0.6, 1]))
fishProportions[5]
fishProportions[5].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[5].append(([0, 0.45],
[0.45, 0.9],
[n, n],
[0.9, 1],
[n, n],
[n, n]))
fishProportions[5].append(([0, 0.2],
[0.2, 0.5],
[n, n],
[0.5, 0.95],
[0.95, 1],
[n, n]))
fishProportions[5].append(([0, 0.1],
[0.1, 0.3],
[n, n],
[0.3, 0.75],
[0.75, 0.8],
[0.8, 1]))
fishProportions[5].append(([n, n],
[0, 0.15],
[n, n],
[0.15, 0.4],
[n, n],
[0.4, 1]))
fishProportions[5].append(([n, n],
[n, n],
[n, n],
[0, 0.4],
[n, n],
[0.6, 1]))
difficultyPatternsAI = {ToontownGlobals.ToontownCentral: [3.5, fishProportions[0], 1.5],
ToontownGlobals.DonaldsDock: [3.0, fishProportions[1], 1.8],
ToontownGlobals.DaisyGardens: [2.5, fishProportions[2], 2.1],
ToontownGlobals.MinniesMelodyland: [2.0, fishProportions[3], 2.4],
ToontownGlobals.TheBrrrgh: [2.0, fishProportions[4], 2.7],
ToontownGlobals.DonaldsDreamland: [1.5, fishProportions[5], 3.0]}
def __init__(self, air, minigameId):
try:
self.DistributedDivingGameAI_initialized
except:
self.DistributedDivingGameAI_initialized = 1
DistributedMinigameAI.__init__(self, air, minigameId)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedDivingGameAI', [State.State('inactive', self.enterInactive, self.exitInactive, ['swimming']), State.State('swimming', self.enterSwimming, self.exitSwimming, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['inactive'])], 'inactive', 'inactive')
self.addChildGameFSM(self.gameFSM)
self.__timeBase = globalClockDelta.localToNetworkTime(globalClock.getRealTime())
def delete(self):
self.notify.debug('delete')
del self.gameFSM
DistributedMinigameAI.delete(self)
def setGameReady(self):
self.notify.debug('setGameReady')
self.sendUpdate('setTrolleyZone', [self.trolleyZone])
for avId in list(self.scoreDict.keys()):
self.scoreDict[avId] = 0
self.treasureHolders = [0] * self.numPlayers
self.SPAWNTIME = self.difficultyPatternsAI[self.getSafezoneId()][0]
self.proportion = self.difficultyPatternsAI[self.getSafezoneId()][1]
self.REWARDMOD = self.difficultyPatternsAI[self.getSafezoneId()][2]
DistributedMinigameAI.setGameReady(self)
self.spawnings = []
for i in range(DivingGameGlobals.NUM_SPAWNERS):
self.spawnings.append(Sequence(Func(self.spawnFish, i), Wait(self.SPAWNTIME + random.random()), Func(self.spawnFish, i), Wait(self.SPAWNTIME - 0.5 + random.random())))
self.spawnings[i].loop()
def setGameStart(self, timestamp):
self.notify.debug('setGameStart')
DistributedMinigameAI.setGameStart(self, timestamp)
self.gameFSM.request('swimming')
self.scoreTracking = {}
for avId in list(self.scoreDict.keys()):
self.scoreTracking[avId] = [0,
0,
0,
0,
0]
def getCrabMoving(self, crabId, crabX, dir):
timestamp = globalClockDelta.getFrameNetworkTime()
rand1 = int(random.random() * 10)
rand2 = int(random.random() * 10)
self.sendUpdate('setCrabMoving', [crabId,
timestamp,
rand1,
rand2,
crabX,
dir])
def treasureRecovered(self):
if not hasattr(self, 'scoreTracking'):
return
avId = self.air.getAvatarIdFromSender()
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.treasureRecovered: invalid avId')
return
if avId not in self.treasureHolders:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.treasureRecovered: tried to recover without holding treasure')
return
self.treasureHolders[self.treasureHolders.index(avId)] = 0
timestamp = globalClockDelta.getFrameNetworkTime()
newSpot = int(random.random() * 30)
self.scoreTracking[avId][4] += 1
for someAvId in list(self.scoreDict.keys()):
if someAvId == avId:
self.scoreDict[avId] += 10 * (self.REWARDMOD * 0.25)
self.scoreDict[someAvId] += 10 * (self.REWARDMOD * 0.75 / float(len(list(self.scoreDict.keys()))))
self.sendUpdate('incrementScore', [avId, newSpot, timestamp])
def hasScoreMult(self):
return 0
def setGameAbort(self):
self.notify.debug('setGameAbort')
taskMgr.remove(self.taskName('gameTimer'))
if self.gameFSM.getCurrentState():
self.gameFSM.request('cleanup')
DistributedMinigameAI.setGameAbort(self)
def gameOver(self):
self.notify.debug('gameOver')
self.gameFSM.request('cleanup')
DistributedMinigameAI.gameOver(self)
trackingString = 'MiniGame Stats : Diving Game'
trackingString += '\nDistrict:%s' % self.getSafezoneId()
for avId in list(self.scoreTracking.keys()):
trackingString = trackingString + '\navId:%s fishHits:%s crabHits:%s treasureCatches:%s treasureDrops:%s treasureRecoveries:%s Score: %s' % (avId,
self.scoreTracking[avId][0],
self.scoreTracking[avId][1],
self.scoreTracking[avId][2],
self.scoreTracking[avId][3],
self.scoreTracking[avId][4],
self.scoreDict[avId])
self.air.writeServerEvent('MiniGame Stats', None, trackingString)
return
def enterInactive(self):
self.notify.debug('enterInactive')
def exitInactive(self):
pass
def getTimeBase(self):
return self.__timeBase
def enterSwimming(self):
self.notify.debug('enterSwimming')
duration = 65.0
taskMgr.doMethodLater(duration, self.timerExpired, self.taskName('gameTimer'))
def timerExpired(self, task):
self.notify.debug('timer expired')
for avId in list(self.scoreDict.keys()):
if self.scoreDict[avId] < 5:
self.scoreDict[avId] = 5
self.gameOver()
return Task.done
def exitSwimming(self):
for i in range(DivingGameGlobals.NUM_SPAWNERS):
self.spawnings[i].pause()
def enterCleanup(self):
self.notify.debug('enterCleanup')
for i in range(DivingGameGlobals.NUM_SPAWNERS):
self.spawnings[i].finish()
del self.spawnings
self.gameFSM.request('inactive')
def exitCleanup(self):
pass
def pickupTreasure(self, chestId):
if not hasattr(self, 'scoreTracking'):
return
timestamp = globalClockDelta.getFrameNetworkTime()
avId = self.air.getAvatarIdFromSender()
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.pickupTreasure: invalid avId')
return
if avId in self.treasureHolders:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.pickupTreasure: already holding treasure')
return
if not (0 <= chestId < len(self.treasureHolders)):
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.pickupTreasure: invalid chest requested (#%d)' % chestId)
return
if self.treasureHolders[chestId]:
# This chest is already held by someone else. Because this can happen
# during normal play (race conditions if two Toons swim into the treasure
# simultaneously) we do not log a suspicious event and silently ignore it.
return
self.scoreTracking[avId][2] += 1
self.treasureHolders[chestId] = avId
self.sendUpdate('setTreasureGrabbed', [avId, chestId])
def spawnFish(self, spawnerId):
timestamp = globalClockDelta.getFrameNetworkTime()
props = self.proportion[spawnerId]
num = random.random()
for i in range(len(props)):
prop = props[i]
low = prop[0]
high = prop[1]
if num > low and num <= high:
offset = int(10 * random.random())
self.sendUpdate('fishSpawn', [timestamp,
i,
spawnerId,
offset])
return
def handleCrabCollision(self, avId, status):
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.handleCrabCollision: invalid avId')
return
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setTreasureDropped', [avId, timestamp])
self.scoreTracking[avId][1] += 1
if status == 'normal' or status == 'treasure':
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('performCrabCollision', [avId, timestamp])
if status == 'treasure':
if avId in self.treasureHolders:
self.treasureHolders[self.treasureHolders.index(avId)] = 0
self.scoreTracking[avId][3] += 1
else:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.handleCrabCollision: reported "treasure drop" without holding treasure')
def handleFishCollision(self, avId, spawnId, spawnerId, status):
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.handleFishCollision: invalid avId')
return
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setTreasureDropped', [avId, timestamp])
timestamp = globalClockDelta.getFrameNetworkTime()
self.scoreTracking[avId][0] += 1
if status == 'treasure':
if avId in self.treasureHolders:
self.treasureHolders[self.treasureHolders.index(avId)] = 0
self.scoreTracking[avId][3] += 1
else:
self.air.writeServerEvent('suspicious', avId, 'DivingGameAI.handleFishCollision: reported "treasure drop" without holding treasure')
self.sendUpdate('performFishCollision', [avId,
spawnId,
spawnerId,
timestamp])
```
#### File: toontown/minigame/DistributedMinigameAI.py
```python
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.ai.ToonBarrier import *
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.shtiker import PurchaseManagerAI
from toontown.shtiker import NewbiePurchaseManagerAI
from . import MinigameCreatorAI
from direct.task import Task
import random
from . import MinigameGlobals
from direct.showbase import PythonUtil
from . import TravelGameGlobals
from toontown.toonbase import ToontownGlobals
EXITED = 0
EXPECTED = 1
JOINED = 2
READY = 3
DEFAULT_POINTS = 1
MAX_POINTS = 7
JOIN_TIMEOUT = 40.0 + MinigameGlobals.latencyTolerance
READY_TIMEOUT = MinigameGlobals.MaxLoadTime + MinigameGlobals.rulesDuration + MinigameGlobals.latencyTolerance
EXIT_TIMEOUT = 20.0 + MinigameGlobals.latencyTolerance
class DistributedMinigameAI(DistributedObjectAI.DistributedObjectAI):
notify = directNotify.newCategory('DistributedMinigameAI')
def __init__(self, air, minigameId):
try:
self.DistributedMinigameAI_initialized
except:
self.DistributedMinigameAI_initialized = 1
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.minigameId = minigameId
self.frameworkFSM = ClassicFSM.ClassicFSM('DistributedMinigameAI', [State.State('frameworkOff', self.enterFrameworkOff, self.exitFrameworkOff, ['frameworkWaitClientsJoin']),
State.State('frameworkWaitClientsJoin', self.enterFrameworkWaitClientsJoin, self.exitFrameworkWaitClientsJoin, ['frameworkWaitClientsReady', 'frameworkWaitClientsExit', 'frameworkCleanup']),
State.State('frameworkWaitClientsReady', self.enterFrameworkWaitClientsReady, self.exitFrameworkWaitClientsReady, ['frameworkGame', 'frameworkWaitClientsExit', 'frameworkCleanup']),
State.State('frameworkGame', self.enterFrameworkGame, self.exitFrameworkGame, ['frameworkWaitClientsExit', 'frameworkCleanup']),
State.State('frameworkWaitClientsExit', self.enterFrameworkWaitClientsExit, self.exitFrameworkWaitClientsExit, ['frameworkCleanup']),
State.State('frameworkCleanup', self.enterFrameworkCleanup, self.exitFrameworkCleanup, ['frameworkOff'])], 'frameworkOff', 'frameworkOff')
self.frameworkFSM.enterInitialState()
self.avIdList = []
self.stateDict = {}
self.scoreDict = {}
self.difficultyOverride = None
self.trolleyZoneOverride = None
self.metagameRound = -1
self.startingVotes = {}
return
def addChildGameFSM(self, gameFSM):
self.frameworkFSM.getStateNamed('frameworkGame').addChild(gameFSM)
def removeChildGameFSM(self, gameFSM):
self.frameworkFSM.getStateNamed('frameworkGame').removeChild(gameFSM)
def setExpectedAvatars(self, avIds):
self.avIdList = avIds
self.numPlayers = len(self.avIdList)
self.notify.debug('BASE: setExpectedAvatars: expecting avatars: ' + str(self.avIdList))
def setNewbieIds(self, newbieIds):
self.newbieIdList = newbieIds
if len(self.newbieIdList) > 0:
self.notify.debug('BASE: setNewbieIds: %s' % self.newbieIdList)
def setTrolleyZone(self, trolleyZone):
self.trolleyZone = trolleyZone
def setDifficultyOverrides(self, difficultyOverride, trolleyZoneOverride):
self.difficultyOverride = difficultyOverride
if self.difficultyOverride is not None:
self.difficultyOverride = MinigameGlobals.QuantizeDifficultyOverride(difficultyOverride)
self.trolleyZoneOverride = trolleyZoneOverride
return
def setMetagameRound(self, roundNum):
self.metagameRound = roundNum
def _playing(self):
if not hasattr(self, 'gameFSM'):
return False
if self.gameFSM.getCurrentState() == None:
return False
return self.gameFSM.getCurrentState().getName() == 'play'
def _inState(self, states):
if not hasattr(self, 'gameFSM'):
return False
if self.gameFSM.getCurrentState() == None:
return False
return self.gameFSM.getCurrentState().getName() in makeList(states)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
self.frameworkFSM.request('frameworkWaitClientsJoin')
def delete(self):
self.notify.debug('BASE: delete: deleting AI minigame object')
del self.frameworkFSM
self.ignoreAll()
DistributedObjectAI.DistributedObjectAI.delete(self)
def isSinglePlayer(self):
if self.numPlayers == 1:
return 1
else:
return 0
def getParticipants(self):
return self.avIdList
def getTrolleyZone(self):
return self.trolleyZone
def getDifficultyOverrides(self):
response = [self.difficultyOverride, self.trolleyZoneOverride]
if response[0] is None:
response[0] = MinigameGlobals.NoDifficultyOverride
else:
response[0] *= MinigameGlobals.DifficultyOverrideMult
response[0] = int(response[0])
if response[1] is None:
response[1] = MinigameGlobals.NoTrolleyZoneOverride
return response
def b_setGameReady(self):
self.setGameReady()
self.d_setGameReady()
def d_setGameReady(self):
self.notify.debug('BASE: Sending setGameReady')
self.sendUpdate('setGameReady', [])
def setGameReady(self):
self.notify.debug('BASE: setGameReady: game ready with avatars: %s' % self.avIdList)
self.normalExit = 1
def b_setGameStart(self, timestamp):
self.d_setGameStart(timestamp)
self.setGameStart(timestamp)
def d_setGameStart(self, timestamp):
self.notify.debug('BASE: Sending setGameStart')
self.sendUpdate('setGameStart', [timestamp])
def setGameStart(self, timestamp):
self.notify.debug('BASE: setGameStart')
def b_setGameExit(self):
self.d_setGameExit()
self.setGameExit()
def d_setGameExit(self):
self.notify.debug('BASE: Sending setGameExit')
self.sendUpdate('setGameExit', [])
def setGameExit(self):
self.notify.debug('BASE: setGameExit')
def setGameAbort(self):
self.notify.debug('BASE: setGameAbort')
self.normalExit = 0
self.sendUpdate('setGameAbort', [])
self.frameworkFSM.request('frameworkCleanup')
def handleExitedAvatar(self, avId):
self.notify.warning('BASE: handleExitedAvatar: avatar id exited: ' + str(avId))
self.stateDict[avId] = EXITED
self.setGameAbort()
def gameOver(self):
self.notify.debug('BASE: gameOver')
if simbase.air.wantAchievements:
for avId in self.avIdList:
av = self.air.doId2do.get(avId)
self.air.achievementsManager.toonPlayedMinigame(av)
self.frameworkFSM.request('frameworkWaitClientsExit')
def enterFrameworkOff(self):
self.notify.debug('BASE: enterFrameworkOff')
def exitFrameworkOff(self):
pass
def enterFrameworkWaitClientsJoin(self):
self.notify.debug('BASE: enterFrameworkWaitClientsJoin')
for avId in self.avIdList:
self.stateDict[avId] = EXPECTED
self.scoreDict[avId] = DEFAULT_POINTS
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.handleExitedAvatar, extraArgs=[avId])
def allAvatarsJoined(self = self):
self.notify.debug('BASE: all avatars joined')
self.b_setGameReady()
self.frameworkFSM.request('frameworkWaitClientsReady')
def handleTimeout(avIds, self = self):
self.notify.debug('BASE: timed out waiting for clients %s to join' % avIds)
self.setGameAbort()
self.__barrier = ToonBarrier('waitClientsJoin', self.uniqueName('waitClientsJoin'), self.avIdList, JOIN_TIMEOUT, allAvatarsJoined, handleTimeout)
def setAvatarJoined(self):
if self.frameworkFSM.getCurrentState().getName() != 'frameworkWaitClientsJoin':
self.notify.debug('BASE: Ignoring setAvatarJoined message')
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('BASE: setAvatarJoined: avatar id joined: ' + str(avId))
self.air.writeServerEvent('minigame_joined', avId, '%s|%s' % (self.minigameId, self.trolleyZone))
self.stateDict[avId] = JOINED
self.notify.debug('BASE: setAvatarJoined: new states: ' + str(self.stateDict))
self.__barrier.clear(avId)
def exitFrameworkWaitClientsJoin(self):
self.__barrier.cleanup()
del self.__barrier
def enterFrameworkWaitClientsReady(self):
self.notify.debug('BASE: enterFrameworkWaitClientsReady')
def allAvatarsReady(self = self):
self.notify.debug('BASE: all avatars ready')
self.frameworkFSM.request('frameworkGame')
def handleTimeout(avIds, self = self):
self.notify.debug("BASE: timed out waiting for clients %s to report 'ready'" % avIds)
self.setGameAbort()
self.__barrier = ToonBarrier('waitClientsReady', self.uniqueName('waitClientsReady'), self.avIdList, READY_TIMEOUT, allAvatarsReady, handleTimeout)
for avId in list(self.stateDict.keys()):
if self.stateDict[avId] == READY:
self.__barrier.clear(avId)
self.notify.debug(' safezone: %s' % self.getSafezoneId())
self.notify.debug('difficulty: %s' % self.getDifficulty())
def setAvatarReady(self):
if self.frameworkFSM.getCurrentState().getName() not in ['frameworkWaitClientsReady', 'frameworkWaitClientsJoin']:
self.notify.debug('BASE: Ignoring setAvatarReady message')
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('BASE: setAvatarReady: avatar id ready: ' + str(avId))
self.stateDict[avId] = READY
self.notify.debug('BASE: setAvatarReady: new avId states: ' + str(self.stateDict))
if self.frameworkFSM.getCurrentState().getName() == 'frameworkWaitClientsReady':
self.__barrier.clear(avId)
def exitFrameworkWaitClientsReady(self):
self.__barrier.cleanup()
del self.__barrier
def enterFrameworkGame(self):
self.notify.debug('BASE: enterFrameworkGame')
self.gameStartTime = globalClock.getRealTime()
self.b_setGameStart(globalClockDelta.localToNetworkTime(self.gameStartTime))
def exitFrameworkGame(self):
pass
def enterFrameworkWaitClientsExit(self):
self.notify.debug('BASE: enterFrameworkWaitClientsExit')
self.b_setGameExit()
def allAvatarsExited(self = self):
self.notify.debug('BASE: all avatars exited')
self.frameworkFSM.request('frameworkCleanup')
def handleTimeout(avIds, self = self):
self.notify.debug('BASE: timed out waiting for clients %s to exit' % avIds)
self.frameworkFSM.request('frameworkCleanup')
self.__barrier = ToonBarrier('waitClientsExit', self.uniqueName('waitClientsExit'), self.avIdList, EXIT_TIMEOUT, allAvatarsExited, handleTimeout)
for avId in list(self.stateDict.keys()):
if self.stateDict[avId] == EXITED:
self.__barrier.clear(avId)
def setAvatarExited(self):
if self.frameworkFSM.getCurrentState().getName() != 'frameworkWaitClientsExit':
self.notify.debug('BASE: Ignoring setAvatarExit message')
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('BASE: setAvatarExited: avatar id exited: ' + str(avId))
self.stateDict[avId] = EXITED
self.notify.debug('BASE: setAvatarExited: new avId states: ' + str(self.stateDict))
self.__barrier.clear(avId)
def exitFrameworkWaitClientsExit(self):
self.__barrier.cleanup()
del self.__barrier
def hasScoreMult(self):
return 1
def enterFrameworkCleanup(self):
self.notify.debug('BASE: enterFrameworkCleanup: normalExit=%s' % self.normalExit)
scoreMult = MinigameGlobals.getScoreMult(self.getSafezoneId())
if not self.hasScoreMult():
scoreMult = 1.0
self.notify.debug('score multiplier: %s' % scoreMult)
for avId in self.avIdList:
self.scoreDict[avId] *= scoreMult
scoreList = []
if not self.normalExit:
randReward = random.randrange(DEFAULT_POINTS, MAX_POINTS + 1)
for avId in self.avIdList:
if self.normalExit:
score = int(self.scoreDict[avId] + 0.5)
else:
score = randReward
if ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY in simbase.air.holidayManager.currentHolidays or ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH in simbase.air.holidayManager.currentHolidays:
score *= MinigameGlobals.JellybeanTrolleyHolidayScoreMultiplier
logEvent = False
if score > 255:
score = 255
logEvent = True
elif score < 0:
score = 0
logEvent = True
if logEvent:
self.air.writeServerEvent('suspicious', avId, 'got %s jellybeans playing minigame %s in zone %s' % (score, self.minigameId, self.getSafezoneId()))
scoreList.append(score)
self.requestDelete()
if self.metagameRound > -1:
self.handleMetagamePurchaseManager(scoreList)
else:
self.handleRegularPurchaseManager(scoreList)
self.frameworkFSM.request('frameworkOff')
def handleMetagamePurchaseManager(self, scoreList):
self.notify.debug('self.newbieIdList = %s' % self.newbieIdList)
votesToUse = self.startingVotes
if hasattr(self, 'currentVotes'):
votesToUse = self.currentVotes
votesArray = []
for avId in self.avIdList:
if avId in votesToUse:
votesArray.append(votesToUse[avId])
else:
self.notify.warning('votesToUse=%s does not have avId=%d' % (votesToUse, avId))
votesArray.append(0)
if self.metagameRound < TravelGameGlobals.FinalMetagameRoundIndex:
newRound = self.metagameRound
if not self.minigameId == ToontownGlobals.TravelGameId:
for index in range(len(scoreList)):
votesArray[index] += scoreList[index]
self.notify.debug('votesArray = %s' % votesArray)
desiredNextGame = None
if hasattr(self, 'desiredNextGame'):
desiredNextGame = self.desiredNextGame
numToons = 0
lastAvId = 0
for avId in self.avIdList:
av = simbase.air.doId2do.get(avId)
if av:
numToons += 1
lastAvId = avId
doNewbie = False
if numToons == 1 and lastAvId in self.newbieIdList:
doNewbie = True
if doNewbie:
pm = NewbiePurchaseManagerAI.NewbiePurchaseManagerAI(self.air, lastAvId, self.avIdList, scoreList, self.minigameId, self.trolleyZone)
MinigameCreatorAI.acquireMinigameZone(self.zoneId)
pm.generateWithRequired(self.zoneId)
else:
pm = PurchaseManagerAI.PurchaseManagerAI(self.air, self.avIdList, scoreList, self.minigameId, self.trolleyZone, self.newbieIdList, votesArray, newRound, desiredNextGame)
pm.generateWithRequired(self.zoneId)
else:
self.notify.debug('last minigame, handling newbies')
if ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY in simbase.air.holidayManager.currentHolidays or ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH in simbase.air.holidayManager.currentHolidays:
votesArray = [MinigameGlobals.JellybeanTrolleyHolidayScoreMultiplier * x for x in votesArray]
for id in self.newbieIdList:
pm = NewbiePurchaseManagerAI.NewbiePurchaseManagerAI(self.air, id, self.avIdList, scoreList, self.minigameId, self.trolleyZone)
MinigameCreatorAI.acquireMinigameZone(self.zoneId)
pm.generateWithRequired(self.zoneId)
if len(self.avIdList) > len(self.newbieIdList):
pm = PurchaseManagerAI.PurchaseManagerAI(self.air, self.avIdList, scoreList, self.minigameId, self.trolleyZone, self.newbieIdList, votesArray=votesArray, metagameRound=self.metagameRound)
pm.generateWithRequired(self.zoneId)
return
def handleRegularPurchaseManager(self, scoreList):
for id in self.newbieIdList:
pm = NewbiePurchaseManagerAI.NewbiePurchaseManagerAI(self.air, id, self.avIdList, scoreList, self.minigameId, self.trolleyZone)
MinigameCreatorAI.acquireMinigameZone(self.zoneId)
pm.generateWithRequired(self.zoneId)
if len(self.avIdList) > len(self.newbieIdList):
pm = PurchaseManagerAI.PurchaseManagerAI(self.air, self.avIdList, scoreList, self.minigameId, self.trolleyZone, self.newbieIdList)
pm.generateWithRequired(self.zoneId)
def exitFrameworkCleanup(self):
pass
def requestExit(self):
self.notify.debug('BASE: requestExit: client has requested the game to end')
self.setGameAbort()
def local2GameTime(self, timestamp):
return timestamp - self.gameStartTime
def game2LocalTime(self, timestamp):
return timestamp + self.gameStartTime
def getCurrentGameTime(self):
return self.local2GameTime(globalClock.getFrameTime())
def getDifficulty(self):
if self.difficultyOverride is not None:
return self.difficultyOverride
if hasattr(self.air, 'minigameDifficulty'):
return float(self.air.minigameDifficulty)
return MinigameGlobals.getDifficulty(self.getSafezoneId())
def getSafezoneId(self):
if self.trolleyZoneOverride is not None:
return self.trolleyZoneOverride
if hasattr(self.air, 'minigameSafezoneId'):
return MinigameGlobals.getSafezoneId(self.air.minigameSafezoneId)
return MinigameGlobals.getSafezoneId(self.trolleyZone)
def logPerfectGame(self, avId):
self.air.writeServerEvent('perfectMinigame', avId, '%s|%s|%s' % (self.minigameId, self.trolleyZone, self.avIdList))
def logAllPerfect(self):
for avId in self.avIdList:
self.logPerfectGame(avId)
def getStartingVotes(self):
retval = []
for avId in self.avIdList:
if avId in self.startingVotes:
retval.append(self.startingVotes[avId])
else:
self.notify.warning('how did this happen? avId=%d not in startingVotes %s' % (avId, self.startingVotes))
retval.append(0)
return retval
def setStartingVote(self, avId, startingVote):
self.startingVotes[avId] = startingVote
self.notify.debug('setting starting vote of avId=%d to %d' % (avId, startingVote))
def getMetagameRound(self):
return self.metagameRound
```
#### File: toontown/minigame/DistributedTargetGame.py
```python
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from .DistributedMinigame import *
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from . import ArrowKeys
from . import TargetGameGlobals
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import math
from math import *
import random
import random
from . import RubberBand
from . import FogOverlay
def circleX(angle, radius, centerX, centerY):
x = radius * cos(angle) + centerX
return x
def circleY(angle, radius, centerX, centerY):
y = radius * sin(angle) + centerY
return y
def getCirclePoints(segCount, centerX, centerY, radius, wideX = 1.0, wideY = 1.0):
returnShape = []
for seg in range(0, int(segCount)):
coordX = wideX * circleX(pi * 2.0 * float(float(seg) / float(segCount)), radius, centerX, centerY)
coordY = wideY * circleY(pi * 2.0 * float(float(seg) / float(segCount)), radius, centerX, centerY)
returnShape.append((coordX, coordY, 1))
coordX = wideX * circleX(pi * 2.0 * float(0 / segCount), radius, centerX, centerY)
coordY = wideY * circleY(pi * 2.0 * float(0 / segCount), radius, centerX, centerY)
returnShape.append((coordX, coordY, 1))
return returnShape
def getRingPoints(segCount, centerX, centerY, radius, thickness = 2.0, wideX = 1.0, wideY = 1.0):
returnShape = []
for seg in range(0, segCount):
coordX = wideX * circleX(pi * 2.0 * float(float(seg) / float(segCount)), radius - thickness, centerX, centerY)
coordY = wideY * circleY(pi * 2.0 * float(float(seg) / float(segCount)), radius - thickness, centerX, centerY)
returnShape.append((coordX, coordY, 1))
coordX = wideX * circleX(pi * 2.0 * float(float(seg) / float(segCount)), radius, centerX, centerY)
coordY = wideY * circleY(pi * 2.0 * float(float(seg) / float(segCount)), radius, centerX, centerY)
returnShape.append((coordX, coordY, 1))
return returnShape
def addRing(attachNode, color, vertexCount, radius, layer = 0, thickness = 1.0):
targetGN = GeomNode('target Circle')
zFloat = 0.025
targetCircleShape = getRingPoints(5 + vertexCount, 0.0, 0.0, radius, thickness)
gFormat = GeomVertexFormat.getV3cp()
targetCircleVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic)
targetCircleVertexWriter = GeomVertexWriter(targetCircleVertexData, 'vertex')
targetCircleColorWriter = GeomVertexWriter(targetCircleVertexData, 'color')
for vertex in targetCircleShape:
targetCircleVertexWriter.addData3f(0.0 + vertex[0], 0.0 + vertex[1], zFloat)
targetCircleColorWriter.addData4f(1.0, 1.0, 1.0, 1.0)
targetTris = GeomTristrips(Geom.UHStatic)
sizeTarget = len(targetCircleShape)
for countVertex in range(0, sizeTarget):
targetTris.addVertex(countVertex)
targetTris.addVertex(0)
targetTris.addVertex(1)
targetTris.closePrimitive()
targetGeom = Geom(targetCircleVertexData)
targetGeom.addPrimitive(targetTris)
attachNode.addGeom(targetGeom)
return targetGeom
def addCircle(attachNode, color, vertexCount, radius, layer = 0):
targetGN = GeomNode('target Circle')
zFloat = 0.025
targetCircleShape = getCirclePoints(5 + vertexCount, 0.0, 0.0, radius)
gFormat = GeomVertexFormat.getV3cp()
targetCircleVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic)
targetCircleVertexWriter = GeomVertexWriter(targetCircleVertexData, 'vertex')
targetCircleColorWriter = GeomVertexWriter(targetCircleVertexData, 'color')
targetCircleVertexWriter.addData3f(0.0, 0.0, zFloat)
targetCircleColorWriter.addData4f(1.0, 1.0, 1.0, 1.0)
for vertex in targetCircleShape:
targetCircleVertexWriter.addData3f(0.0 + vertex[0], 0.0 + vertex[1], zFloat)
targetCircleColorWriter.addData4f(1.0, 1.0, 1.0, 1.0)
targetTris = GeomTrifans(Geom.UHStatic)
sizeTarget = len(targetCircleShape)
targetTris.addVertex(0)
for countVertex in range(1, sizeTarget + 1):
targetTris.addVertex(countVertex)
targetTris.addVertex(1)
targetTris.closePrimitive()
targetGeom = Geom(targetCircleVertexData)
targetGeom.addPrimitive(targetTris)
attachNode.addGeom(targetGeom)
return targetGeom
def checkPlace(placeX, placeY, fillSize, placeList):
goodPlacement = 1
for place in placeList:
distance = math.sqrt((place[0] - placeX) * (place[0] - placeX) + (place[1] - placeY) * (place[1] - placeY))
distance = distance - (fillSize + place[2])
if distance <= 0.0:
goodPlacement = 0
break
return goodPlacement
class DistributedTargetGame(DistributedMinigame):
UPDATE_ENVIRON_TASK = 'TargetGameUpdateEnvironTask'
UPDATE_LOCALTOON_TASK = 'TargetGameUpdateLocalToonTask'
UPDATE_SHADOWS_TASK = 'TargetGameUpdateShadowsTask'
COLLISION_DETECTION_TASK = 'TargetGameCollisionDetectionTask'
END_GAME_WAIT_TASK = 'TargetGameCollisionDetectionTask'
UPDATE_POWERBAR_TASK = 'TargetGameUpdatePowerBarTask'
GAME_DONE_TASK = 'gameDoneTask'
UPDATE_COUNTDOWN_TASK = 'countdown task'
FLY2FALL_CAM_TASK = 'fly2FallCameraTask'
PRELAUNCH_CAM_TASK = 'prelaunchCameraTask'
SCORE_CAM_TASK = 'scoreCameraTask'
TOONSTRETCHTASK = 'tooncamtask'
UPDATE_LOCAL_TOON_PRIORITY = 0
UPDATE_POWER_BAR_PRIORITY = 1
UPDATE_RUBBER_BAND_PRIORITY = 2
COLLISION_DETECTION_PRIORITY = 5
UPDATE_SHADOWS_PRIORITY = 47
UMBRELLA_TEXTURE_LIST = ['phase_4/maps/mg_slingshot_umbrella_blue.jpg',
'phase_4/maps/mg_slingshot_umbrella_purple.jpg',
'phase_4/maps/mg_slingshot_umbrella_red.jpg',
'phase_4/maps/mg_slingshot_umbrella_yellow.jpg']
RT_UNKNOWN = 0
RT_SUCCESS = 1
RT_GROUPSUCCESS = 2
RT_FAILURE = 3
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTargetGame', [State.State('off', self.enterOff, self.exitOff, ['prelaunch']),
State.State('prelaunch', self.enterPrelaunch, self.exitPrelaunch, ['launch']),
State.State('launch', self.enterLaunch, self.exitLaunch, ['fly']),
State.State('fly', self.enterFly, self.exitFly, ['fall', 'bouncing', 'score']),
State.State('fall', self.enterFall, self.exitFall, ['bouncing', 'score']),
State.State('bouncing', self.enterBouncing, self.exitBouncing, ['score']),
State.State('score', self.enterScore, self.exitScore, ['cleanup', 'prelaunch']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.targets = None
self.round = 1
self.signalLaunch = 0
self.stretchX = 0
self.stretchY = 0
self.targetsPlaced = []
self.pattern = None
self.maxDist = 50
self.targetMarker = None
self.umbrellaColorSelect = [1,
1,
1,
1]
self.localTrack = None
self.hitInfo = None
return
def getTitle(self):
return TTLocalizer.TargetGameTitle
def getInstructions(self):
p = self.avIdList.index(self.localAvId)
if self.isSinglePlayer():
text = TTLocalizer.TargetGameInstructionsSinglePlayer
else:
text = TTLocalizer.TargetGameInstructionsMultiPlayer
return text
def getMaxDuration(self):
return 60
def defineConstants(self):
self.CAMERA_Y = -25
self.TOON_Y = 0
self.FAR_PLANE_DIST = 370
tScreenCenterToEdge = 1.0
self.TOONXZ_SPEED = TargetGameGlobals.MAX_TOONXZ / tScreenCenterToEdge
self.WATER_DEPTH = 75.0
self.ENVIRON_LENGTH = TargetGameGlobals.ENVIRON_LENGTH
self.ENVIRON_WIDTH = TargetGameGlobals.ENVIRON_WIDTH
self.ENVIRON_START_OFFSET = 20.0
self.TOON_INITIAL_SPACING = 14.0
waterZOffset = 28.0
self.SEA_FLOOR_Z = -self.WATER_DEPTH / 2.0 + waterZOffset
self.FOGDISTGROUND = 600
self.FOGDISTCLOUD = 200
farPlaneDist = self.CAMERA_Y + self.FAR_PLANE_DIST - self.TOON_Y
self.ringGroupArrivalPeriod = 3.0
self.AMB_COLOR = Vec4(0.75, 0.8, 1.0, 1.0)
self.CLOUD_COLOR = Vec4(1.0, 1.0, 1.0, 1.0)
self.SHADOW_Z_OFFSET = 0.1
self.Y_VIS_MAX = self.FAR_PLANE_DIST
self.Y_VIS_MIN = self.CAMERA_Y
targetRadius = TargetGameGlobals.TARGET_RADIUS * 1.025
self.GAME_END_DELAY = 1.0
self.TOON_LOD = 1000
self.speedLaunch = 60
self.speedStall = 30
self.speedForward = 0
self.airResistance = 0.1
self.umbrellaResistance = 0.11
self.distance = 0
self.zVel = 0
self.lastPressed = None
self.lastPressTime = None
self.driftX = 0
self.driftY = 0
self.launchLaterial = 0
self.laterial = 0
self.gravity = 12.5
self.inList = []
self.placeShift = 10
self.startPower = 10
self.canPressRight = 1
self.canPressLeft = 1
self.jumpCount = 0
self.trampZ = 3.6
self.trampBounceCount = 0
self.cameraState = 'Low'
self.updateTick = 0
self.hitInfo = None
return
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.defineConstants()
self.help = DirectLabel(parent=aspect2d, relief=None, pos=(-0.0, 0, -0.8), text=TTLocalizer.TargetGameCountdown, text_scale=0.08, text_fg=(1, 1, 1, 1))
self.help.hide()
self.fogOver = FogOverlay.FogOverlay(Point3(self.CLOUD_COLOR[0], self.CLOUD_COLOR[1], self.CLOUD_COLOR[2]))
self.scoreboard = DirectFrame(parent=aspect2d, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.75, 1, 0.75), pos=(0, 0, 0.587))
self.roundLabel = DirectLabel(parent=self.scoreboard, relief=None, pos=(0, 0, 0.28), text=TTLocalizer.TargetGameBoard % self.round, text_scale=0.08)
av1Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(-0.8, 0, 0.14), text_align=TextNode.ALeft, text=' ', text_scale=0.08)
av2Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(-0.8, 0, 0.02), text_align=TextNode.ALeft, text=' ', text_scale=0.08)
av3Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(-0.8, 0, -0.1), text_align=TextNode.ALeft, text=' ', text_scale=0.08)
av4Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(-0.8, 0, -0.22), text_align=TextNode.ALeft, text=' ', text_scale=0.08)
score1Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(0.8, 0, 0.14), text_align=TextNode.ARight, text=' ', text_scale=0.08)
score2Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(0.8, 0, 0.02), text_align=TextNode.ARight, text=' ', text_scale=0.08)
score3Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(0.8, 0, -0.1), text_align=TextNode.ARight, text=' ', text_scale=0.08)
score4Label = DirectLabel(parent=self.scoreboard, relief=None, pos=(0.8, 0, -0.22), text_align=TextNode.ARight, text=' ', text_scale=0.08)
self.avLabels = [av1Label,
av2Label,
av3Label,
av4Label]
self.scoreLabels = [score1Label,
score2Label,
score3Label,
score4Label]
self.scoreboard.hide()
self.music = base.loader.loadMusic('phase_4/audio/bgm/MG_Diving.ogg')
self.sndAmbience = None
self.skyListLow = []
self.skyListMid = []
loadBase = 'phase_4/models/minigames/'
self.modelName = 'slingshot_game_desert4.bam'
self.trampoline = loader.loadModel('phase_4/models/minigames/slingshot_game_tramp.bam')
self.trampoline.setBin('ground', 100)
self.trampoline.find('**/tramp_shadow').hide()
self.cacti = loader.loadModel('phase_4/models/minigames/slingshot_game_cacti.bam')
self.cactusA = self.cacti.find('**/cactus_a')
self.cactusB = self.cacti.find('**/cactus_b')
self.findGround = '**/sand_layer1'
self.findSky = '**/blue_sky_layer1'
self.findCloudExtra = '**/cloud_layer1'
self.findCloud = '**/cloud_layer_alpha'
self.findLeftMount = '**/mountain_set_b'
self.findRightMount = '**/mountain_c'
self.findBack = '**/backdrop_layer1'
self.environModel = loader.loadModel(loadBase + self.modelName)
self.skyGN = GeomNode('sky Geometry')
self.skyNode1 = self.environModel.attachNewNode(self.skyGN)
leftMountModel = self.environModel.find(self.findLeftMount)
rightMountModel = self.environModel.find(self.findRightMount)
wrongCloud = self.environModel.find(self.findCloudExtra)
wrongCloud.hide()
backModel = self.environModel.find(self.findBack)
backModel.hide()
leftMountModel.hide()
rightMountModel.hide()
groundModel = self.environModel.find(self.findGround)
groundModel.setBin('ground', 0)
groundModel.setZ(-0.05)
skyModel = self.environModel.find(self.findSky)
skyModel2 = self.environModel.find(self.findCloud)
skyModel3 = self.environModel.attachNewNode('skyLow')
skyModel2.copyTo(skyModel3)
skyModel.setZ(76.0)
skyModel.setColorScale(1.0, 1.0, 1.0, 1.0)
skyModel.setBin('ground', 2)
skyModel2.setZ(63.0)
skyModel2.setDepthWrite(0)
skyModel2.setTransparency(1)
skyModel2.setColorScale(1.0, 1.0, 1.0, 1.0)
skyModel2.setTwoSided(True)
skyModel2.setBin('fixed', 3)
skyModel2.setR(180)
skyModel3.setZ(50.0)
skyModel3.setDepthWrite(0)
skyModel3.setTransparency(1)
skyModel3.setColorScale(1.0, 1.0, 1.0, 1.0)
skyModel3.setTwoSided(True)
skyModel3.setBin('fixed', 4)
self.skyListLow.append(skyModel3)
self.lowSky = skyModel3
self.environModel.setPos(0, -5.0, 0)
self.environModel.flattenMedium()
self.environModel.setScale(15.0, 15.0, 2.0)
self.dropShadowModel = loader.loadModel('phase_3/models/props/drop_shadow')
self.dropShadowModel.setColor(0, 0, 0, 0.5)
self.dropShadowModel.flattenMedium()
self.toonDropShadows = []
self.__textGen = TextNode('targetGame')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
self.powerBar = DirectWaitBar(guiId='launch power bar', pos=(0.0, 0, -0.65), relief=DGG.SUNKEN, frameSize=(-2.0,
2.0,
-0.2,
0.2), borderWidth=(0.02, 0.02), scale=0.25, range=100, sortOrder=50, frameColor=(0.5, 0.5, 0.5, 0.5), barColor=(1.0, 0.0, 0.0, 1.0), text='0%', text_scale=0.26, text_fg=(1, 1, 1, 1), text_align=TextNode.ACenter, text_pos=(0, -0.05))
self.power = self.startPower
self.powerBar['value'] = self.power
self.powerBar.hide()
self.rubberBands = []
self.umbrella = loader.loadModel('phase_4/models/minigames/slingshot_game_umbrellas.bam')
pick = random.randint(0, 3)
self.umbrellaColorSelect[pick] = 0
tex = loader.loadTexture(DistributedTargetGame.UMBRELLA_TEXTURE_LIST[pick])
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
self.umbrella.setTexture(tex, 1)
open = self.umbrella.find('**/open_umbrella')
open.hide()
open.setPos(0.1, 0.2, -0.1)
open.setHpr(-90.0, 0.0, 15.0)
closed = self.umbrella.find('**/closed_umbrella')
closed.show()
hands = localAvatar.getRightHands()
ce = CompassEffect.make(NodePath(), CompassEffect.PRot)
closed.node().setEffect(ce)
closed.setHpr(0.0, 100.0, 35.0)
for hand in hands:
self.umbrella.instanceTo(hand)
self.remoteUmbrellas = {}
self.addSound('wind1', 'target_cloud.ogg', 'phase_4/audio/sfx/')
self.addSound('trampoline', 'target_trampoline_2.ogg', 'phase_4/audio/sfx/')
self.addSound('launch', 'target_launch.ogg', 'phase_4/audio/sfx/')
self.addSound('miss', 'target_Lose.ogg', 'phase_4/audio/sfx/')
self.addSound('score', 'target_happydance.ogg', 'phase_4/audio/sfx/')
self.addSound('impact', 'target_impact_grunt1.ogg', 'phase_4/audio/sfx/')
self.addSound('umbrella', 'target_chute.ogg', 'phase_4/audio/sfx/')
self.addSound('bounce', 'target_impact_only.ogg', 'phase_4/audio/sfx/')
self.flySound = loader.loadSfx('phase_4/audio/sfx/target_wind_fly_loop.ogg')
self.flySound.setVolume(0.0)
self.flySound.setPlayRate(1.0)
self.flySound.setLoop(True)
self.flySound.play()
self.rubberSound = loader.loadSfx('phase_4/audio/sfx/target_stretching_aim_loop.ogg')
self.rubberSound.setVolume(0.0)
self.rubberSound.setPlayRate(1.0)
self.rubberSound.setLoop(True)
self.rubberSound.play()
self.flutterSound = loader.loadSfx('phase_4/audio/sfx/target_wind_float_clothloop.ogg')
self.flutterSound.setVolume(1.0)
self.flutterSound.setPlayRate(1.0)
self.flutterSound.setLoop(True)
return
def addSound(self, name, soundName, path = None):
if not hasattr(self, 'soundTable'):
self.soundTable = {}
if path:
self.soundPath = path
soundSource = '%s%s' % (self.soundPath, soundName)
self.soundTable[name] = loader.loadSfx(soundSource)
def playSound(self, name, volume = 1.0):
if hasattr(self, 'soundTable'):
self.soundTable[name].setVolume(volume)
self.soundTable[name].play()
def addSkys(self, instance):
low = instance.find('**/skyLow')
mid = instance.find(self.findCloud)
self.skyListLow.append(low)
self.skyListMid.append(mid)
def setTargetSeed(self, targetSeed, extra = None):
if not self.hasLocalToon:
return
random.seed(targetSeed)
self.pattern = TargetGameGlobals.difficultyPatterns[self.getSafezoneId()]
print('seed %s' % targetSeed)
self.setupTargets()
def setupTargets(self):
fieldWidth = self.ENVIRON_WIDTH * 3
fieldLength = self.ENVIRON_LENGTH * 3.7
self.targetSubParts = [2,
2,
2,
1]
self.targetList = self.pattern[0]
self.targetValue = self.pattern[1]
self.targetSize = self.pattern[2]
self.targetColors = self.pattern[3]
self.targetSubParts = self.pattern[4]
highestValue = 0
for value in self.targetValue:
if value > highestValue:
highestValue = value
placeList = []
self.jumpColor = TargetGameGlobals.colorBlack
self.placeValue = highestValue * 0.5
self.jumpSize = self.pattern[5]
self.jumpNum = self.pattern[6]
self.accept('enterJump', self.jumpIn)
self.accept('exitJump', self.jumpOut)
self.targets = render.attachNewNode('targetGameTargets')
for typeIndex in range(len(self.targetList)):
for targetIndex in range(self.targetList[typeIndex]):
goodPlacement = 0
while not goodPlacement:
placeX = random.random() * (fieldWidth * 0.6) - fieldWidth * 0.6 * 0.5
placeY = (random.random() * 0.6 + (0.0 + 0.4 * (self.placeValue * 1.0 / (highestValue * 1.0)))) * fieldLength
fillSize = self.targetSize[typeIndex]
goodPlacement = checkPlace(placeX, placeY, fillSize, placeList)
placeList.append((placeX, placeY, fillSize))
subIndex = self.targetSubParts[typeIndex]
first = 1
while subIndex:
combinedIndex = typeIndex + subIndex - 1
targetGN = GeomNode('target Circle')
targetNodePathGeom = self.targets.attachNewNode(targetGN)
targetNodePathGeom.setPos(placeX, placeY, 0)
points = int(self.targetSize[combinedIndex] / 2) + 20
order = len(self.targetList) - combinedIndex + 10
if first:
first = 0
geo = addCircle(targetGN, self.targetColors[combinedIndex], points, self.targetSize[combinedIndex], order)
else:
thickness = abs(self.targetSize[combinedIndex] - self.targetSize[combinedIndex + 1]) - 0.0
geo = addRing(targetGN, self.targetColors[combinedIndex], points, self.targetSize[combinedIndex], order, thickness)
targetNodePathGeom.setBin('ground', combinedIndex * 2 + 1)
targetNodePathGeom.setColorScale(self.targetColors[combinedIndex]['Red'], self.targetColors[combinedIndex]['Green'], self.targetColors[combinedIndex]['Blue'], self.targetColors[combinedIndex]['Alpha'])
targetNodePathGeom.setDepthWrite(False)
targetNodePathGeom.setDepthTest(False)
targetNodePathGeom.setTransparency(TransparencyAttrib.MAlpha)
self.targetsPlaced.append((placeX,
placeY,
combinedIndex,
geo))
subIndex -= 1
for jump in range(self.jumpNum):
normJumpSize = 6.8
goodPlacement = 0
while not goodPlacement:
placeX = random.random() * (fieldWidth * 0.6) - fieldWidth * 0.6 * 0.5
placeY = (random.random() * 0.6 + (0.0 + 0.4 * (self.placeValue * 1.0 / (highestValue * 1.0)))) * fieldLength
fillSize = self.jumpSize
goodPlacement = checkPlace(placeX, placeY, fillSize, placeList)
placeList.append((placeX, placeY, fillSize))
target = CollisionSphere(0, 0, 0, self.jumpSize)
target.setTangible(0)
targetNode = CollisionNode('Jump')
targetNode.addSolid(target)
targetGN = GeomNode('target jump Circle')
targetNodePathGeom = self.targets.attachNewNode(targetGN)
trampI = self.trampoline.copyTo(self.targets)
trampI.setPos(placeX, placeY, 0.05)
latScale = self.jumpSize / normJumpSize
trampI.setScale(latScale, latScale, 1.0)
targetNodePath = self.targets.attachNewNode(targetNode)
targetNodePath.setPos(placeX, placeY, 0.0)
targetNodePathGeom.setPos(placeX, placeY, 0.05)
points = self.jumpSize / 2 + 20
order = 0
addCircle(targetGN, self.jumpColor, points, self.jumpSize + 0.5, order)
targetNodePathGeom.setColorScale(self.jumpColor['Red'], self.jumpColor['Green'], self.jumpColor['Blue'], 0.25)
targetNodePathGeom.setBin('ground', 20)
targetNodePathGeom.setDepthWrite(True)
targetNodePathGeom.setTransparency(TransparencyAttrib.MAlpha)
cactusCount = 30
for cactus in range(cactusCount):
placeX = random.random() * (fieldWidth * 0.75) - fieldWidth * 0.75 * 0.5
placeY = 5.0 + random.random() * (fieldLength - 5.0)
targetGN = GeomNode('cactus')
cactus = random.choice([self.cactusA, self.cactusB])
targetNodePathGeom = self.targets.attachNewNode(targetGN)
targetNodePathGeom = self.targets.attachNewNode(targetGN)
cactusI = cactus.copyTo(self.targets)
cactusI.setPos(placeX, placeY, 0)
cactusI.setHpr(random.random() * 360, 0, 0)
cactusI.setScale(0.5 + random.random())
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
del self.__textGen
del self.toonDropShadows
self.dropShadowModel.removeNode()
del self.dropShadowModel
self.environModel.removeNode()
del self.environModel
self.fogOver.delete()
open = self.umbrella.find('**/open_umbrella')
open.hide()
closed = self.umbrella.find('**/closed_umbrella')
closed.hide()
self.umbrella.removeNode()
del self.umbrella
for umbrella in self.remoteUmbrellas:
self.remoteUmbrellas[umbrella].removeNode()
self.remoteUmbrellas.clear()
del self.remoteUmbrellas
self.flySound.stop()
del self.flySound
self.rubberSound.stop()
del self.rubberSound
self.flutterSound.stop()
del self.flutterSound
del self.music
del self.sndAmbience
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
if self.targets:
self.targets.removeNode()
del self.targets
self.scoreboard.destroy()
del self.scoreboard
self.powerBar.destroy()
del self.powerBar
self.help.destroy()
del self.help
if self.localTrack:
self.localTrack.finish()
del self.localTrack
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.arrowKeys = ArrowKeys.ArrowKeys()
toon = base.localAvatar
toon.reparentTo(render)
toon.setAnimState('SitStart', 1.0)
toon.useLOD(self.TOON_LOD)
self.__placeToon(self.localAvId)
self.localStartPos = toon.getPos()
localBand = RubberBand.RubberBand(toon, Point3(0, -1.75, 0), taskPriority=self.UPDATE_RUBBER_BAND_PRIORITY)
tP = self.getToonPlace(self.localAvId)
pos = Point3(tP[0], tP[1] + 10, tP[2])
localBand.setPos(pos)
self.rubberBands.append(localBand)
toon.dropShadow.hide()
toonPos = base.localAvatar.getPos()
camera.reparentTo(render)
toonPos = self.getAvatar(self.localAvId).getPos()
camera.setPosHpr(toonPos[0], toonPos[1] - 18, toonPos[2] + 10, 0, -15, 0)
base.camLens.setMinFov(80/(4./3.))
base.camLens.setFar(self.FOGDISTGROUND)
base.setBackgroundColor(self.AMB_COLOR)
self.__fog = Fog('ringGameFog')
if base.wantFog:
self.__fog.setColor(self.AMB_COLOR)
self.__fog.setLinearRange(0.1, self.FOGDISTGROUND)
render.setFog(self.__fog)
self.environNode = render.attachNewNode('environNode')
self.environBlocks = []
maxI = 4
self.maxDist = (maxI - 1) * self.ENVIRON_LENGTH
for i in range(-1, maxI + 1):
instance = self.environModel.copyTo(self.environNode)
y = self.ENVIRON_LENGTH * i
instance.setY(y)
self.addSkys(instance)
self.environBlocks.append(instance)
jRange = 3
for j in range(0, jRange):
instance = self.environModel.copyTo(self.environNode)
x = self.ENVIRON_WIDTH * (j + 1)
instance.setY(y)
instance.setX(-x)
height = random.random() * 5.0 + 5.0
heading = random.random() * 15.0 - 7.5
self.addSkys(instance)
self.environBlocks.append(instance)
if j == jRange - 2:
mount = instance.find(self.findLeftMount)
mount.setScale(1.0, 1.1, height)
mount.setBin('ground', 1)
mount.show()
sand = mount.find('**/sand_b')
sand.hide()
mount.setPos(mount.getX() + 8.5, 0, 0.0)
if i >= maxI - 1:
mount.setHpr(310, 0, 0)
mount.setScale(1.0, 1.5, height * 1.2)
mount.setPos(mount.getX() - 2.0, 0, 0.0)
if i == maxI:
mount.hide()
if i == -1:
mount.setHpr(50, 0, 0)
mount.setScale(1.0, 1.5, height * 1.2)
mount.setPos(mount.getX() + 5.0, 3.0, 0.0)
if j == jRange - 1:
sand = instance.find(self.findGround)
sand.hide()
for j in range(0, jRange):
instance = self.environModel.copyTo(self.environNode)
x = self.ENVIRON_WIDTH * (j + 1)
instance.setY(y)
instance.setX(x)
height = random.random() * 5.0 + 5.0
heading = random.random() * 15.0 - 7.5
self.addSkys(instance)
self.environBlocks.append(instance)
if j == jRange - 2:
mount = instance.find(self.findRightMount)
mount.setScale(1.0, 1.1, height)
mount.setBin('ground', 1)
mount.show()
sand = mount.find('**/sand_c')
sand.hide()
mount.setPos(mount.getX() - 8.5, 0, 0.0)
if i >= maxI - 1:
mount.setHpr(50, 0, 0)
mount.setScale(1.0, 1.5, height * 1.2)
mount.setPos(mount.getX() + 2.0, 0, 0.0)
if i == maxI:
mount.hide()
if i == -1:
mount.setHpr(310, 0, 0)
mount.setScale(1.0, 1.5, height * 1.2)
mount.setPos(mount.getX() - 5.0, 3.0, 0.0)
if j == jRange - 1:
sand = instance.find(self.findGround)
sand.hide()
self.__addToonDropShadow(self.getAvatar(self.localAvId))
self.__spawnUpdateEnvironTask()
self.__spawnUpdateShadowsTask()
self.__spawnUpdateLocalToonTask()
if self.music:
base.playMusic(self.music, looping=1, volume=0.8)
if None != self.sndAmbience:
base.playSfx(self.sndAmbience, looping=1, volume=0.8)
return
def reset(self):
toon = base.localAvatar
toon.setAnimState('SitStart', 1.0)
toon.useLOD(self.TOON_LOD)
self.__placeToon(self.localAvId)
toon.dropShadow.hide()
self.__spawnUpdateLocalToonTask()
toonPos = base.localAvatar.getPos()
camera.setPosHpr(toonPos[0], toonPos[1] - 26, toonPos[2] + 10, 0, -15, 0)
base.camLens.setMinFov(80/(4./3.))
self.resetNums()
def resetNums(self):
self.stretchX = 0
self.stretchY = 0
self.driftX = 0
self.driftY = 0
self.driftY = 0
self.jumpCount = 0
self.trampBounceCount = 0
self.cameraState = 'Low'
def offstage(self):
self.notify.debug('offstage')
DistributedMinigame.offstage(self)
if self.music:
self.music.stop()
if None != self.sndAmbience:
self.sndAmbience.stop()
self.__killUpdateLocalToonTask()
self.__killUpdateShadowsTask()
self.__killUpdateEnvironTask()
self.__killCountDownTask()
del self.soundTable
self.__removeAllToonDropShadows()
render.clearFog()
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
base.camLens.setMinFov(ToontownGlobals.DefaultCameraFov/(4./3.))
camera.setHpr(0, 90, 0)
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
self.arrowKeys.destroy()
del self.arrowKeys
for block in self.environBlocks:
del block
self.environNode.removeNode()
del self.environNode
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.dropShadow.show()
av.resetLOD()
for band in self.rubberBands:
band.delete()
rubberBands = []
self.targetsPlaced = []
def handleDisabledAvatar(self, avId):
self.notify.debug('handleDisabledAvatar')
self.notify.debug('avatar ' + str(avId) + ' disabled')
self.__removeToonDropShadow(self.remoteToons[avId])
DistributedMinigame.handleDisabledAvatar(self, avId)
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def __placeToon(self, avId):
toon = self.getAvatar(avId)
i = self.avIdList.index(avId)
numToons = float(self.numPlayers)
x = i * self.TOON_INITIAL_SPACING
x -= self.TOON_INITIAL_SPACING * (numToons - 1) / 2.0
pos = self.getToonPlace(avId)
toon.setPosHpr(pos[0], pos[1], pos[2], 0, 0, 0)
def getToonPlace(self, avId):
toon = self.getAvatar(avId)
i = self.avIdList.index(avId)
numToons = float(self.numPlayers)
x = i * self.TOON_INITIAL_SPACING
x -= self.TOON_INITIAL_SPACING * (numToons - 1) / 2.0
pos = Point3(x + self.placeShift, self.TOON_Y, 1.0)
return pos
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
if not self.isSinglePlayer():
base.localAvatar.collisionsOff()
cSphere = CollisionSphere(0.0, 0.0, 0.0, TargetGameGlobals.CollisionRadius)
cSphereNode = CollisionNode('RingGameSphere-%s' % self.localAvId)
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(TargetGameGlobals.CollideMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
self.cSphereNodePath = base.localAvatar.attachNewNode(cSphereNode)
self.pusher = CollisionHandlerPusher()
self.pusher.addCollider(self.cSphereNodePath, base.localAvatar)
self.pusher.setHorizontal(0)
self.cTrav = CollisionTraverser('DistributedRingGame')
self.cTrav.addCollider(self.cSphereNodePath, self.pusher)
self.remoteToonCollNPs = {}
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
cSphere = CollisionSphere(0.0, 0.0, 0.0, TargetGameGlobals.CollisionRadius)
cSphereNode = CollisionNode('RingGameSphere-%s' % avId)
cSphereNode.addSolid(cSphere)
cSphereNode.setCollideMask(TargetGameGlobals.CollideMask)
cSphereNP = toon.attachNewNode(cSphereNode)
self.remoteToonCollNPs[avId] = cSphereNP
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
toon.reparentTo(render)
self.__placeToon(avId)
toon.setAnimState('SitStart', 1.0)
toon.useLOD(self.TOON_LOD)
toon.dropShadow.hide()
self.__addToonDropShadow(toon)
band = RubberBand.RubberBand(toon, Point3(0, -1.75, 0), taskPriority=self.UPDATE_RUBBER_BAND_PRIORITY)
tP = self.getToonPlace(avId)
pos = Point3(tP[0], tP[1] + 10, tP[2])
band.setPos(pos)
self.rubberBands.append(band)
umbrella = self.umbrella.copyTo(render)
self.remoteUmbrellas[avId] = umbrella
test = 0
kill = 16
while test == 0 and kill > 0:
kill -= 1
pick = random.randint(0, 3)
if self.umbrellaColorSelect[pick]:
self.umbrellaColorSelect[pick] = 0
test = 1
tex = loader.loadTexture(DistributedTargetGame.UMBRELLA_TEXTURE_LIST[pick])
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
umbrella.setTexture(tex, 1)
open = umbrella.find('**/open_umbrella')
open.hide()
open.setPos(0.1, 0.2, -0.1)
open.setHpr(-90.0, 180.0, 0.0)
open.setScale(1.5)
closed = umbrella.find('**/closed_umbrella')
closed.show()
hands = toon.getRightHands()
hand = hands[0]
ce = CompassEffect.make(NodePath(), CompassEffect.PRot)
closed.node().setEffect(ce)
closed.setHpr(0.0, 90.0, 35.0)
umbrella.reparentTo(hand)
toon.startSmooth()
self.remoteToons = {}
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
self.remoteToons[avId] = toon
labelIndex = 0
for avId in self.avIdList:
av = self.getAvatar(avId)
self.avLabels[labelIndex]['text'] = av.getName()
labelIndex += 1
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.gameFSM.request('prelaunch')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterPrelaunch(self):
self.defineConstants()
self.resetNums()
self.gameFSM.request('launch')
self.powerBar.show()
self.powerBar['value'] = self.startPower
camera.reparentTo(render)
self.__placeToon(self.localAvId)
toonPos = self.getAvatar(self.localAvId).getPos()
newPos = Point3(toonPos[0], toonPos[1] - 25, toonPos[2] + 7)
newHpr = Point3(0, -15, 0)
self.cameraWork = camera.posHprInterval(2.5, newPos, newHpr, blendType='easeInOut')
self.cameraWork.start()
base.camLens.setMinFov(80/(4./3.))
self.stretchY = self.startPower
self.power = self.startPower
self.scoreboard.hide()
open = self.umbrella.find('**/open_umbrella')
open.hide()
closed = self.umbrella.find('**/closed_umbrella')
closed.show()
self.umbrella.setScale(1.0)
if self.targetMarker:
self.targetMarker.removeNode()
def exitPrelaunch(self):
pass
def enterLaunch(self):
self.__spawnUpdatePowerBarTask()
self.__spawnCountDownTask()
self.help.show()
self.help['text'] = TTLocalizer.TargetGameCountHelp
def exitLaunch(self):
self.cameraWork.finish()
self.__killUpdatePowerBarTask()
self.speedLaunch = self.power
self.powerBar.hide()
self.playSound('launch')
def enterFly(self):
self.notify.debug('enterFly')
self.__spawnCollisionDetectionTask()
self.__placeToon(self.localAvId)
self.speedForward = self.speedLaunch
self.laterial = -self.launchLaterial * 7
self.zVel = self.speedForward * 0.5
toon = base.localAvatar
toon.b_setAnimState('swim', 1.0)
toon.stopBobSwimTask()
toon.dropShadow.hide()
camera.reparentTo(base.localAvatar)
camera.setPosHpr(0, self.CAMERA_Y + self.TOON_Y + 12, 5, 0, -15, 0)
base.camLens.setMinFov(80/(4./3.))
self.help.show()
self.help['text'] = TTLocalizer.TargetGameFlyHelp
self.rubberSound.setVolume(0.0)
self.rubberSound.setPlayRate(1.0)
self.rubberSound.stop()
def exitFly(self):
taskMgr.remove(self.END_GAME_WAIT_TASK)
self.__killCollisionDetectionTask()
self.help.hide()
def enterFall(self):
self.notify.debug('enterLaunch')
toon = base.localAvatar
toon.b_setAnimState('jumpAirborne', 1.0)
toon.dropShadow.hide()
self.speedForward = self.speedForward * 0.5
self.gravity = 4
newHpr = Point3(0, -68, 0)
newPos = Point3(0, self.CAMERA_Y + self.TOON_Y + 15, 15)
camera.posHprInterval(2.5, newPos, newHpr, blendType='easeInOut').start()
open = self.umbrella.find('**/open_umbrella')
open.show()
closed = self.umbrella.find('**/closed_umbrella')
closed.hide()
self.umbrella.setHpr(0, 180.0, -25.0)
self.umbrella.setScale(1.5)
self.help.show()
self.help['text'] = TTLocalizer.TargetGameFallHelp
self.playSound('umbrella')
self.flutterSound.play()
def exitFall(self):
self.flutterSound.stop()
def enterBouncing(self):
self.notify.debug('enterLaunch')
self.gravity = 12.5
open = self.umbrella.find('**/open_umbrella')
open.hide()
closed = self.umbrella.find('**/closed_umbrella')
closed.hide()
self.umbrella.setHpr(0, 0, 0)
self.umbrella.setScale(1.0)
toon = base.localAvatar
toon.b_setAnimState('neutral', 1.0)
toon.dropShadow.hide()
self.help.show()
self.help['text'] = TTLocalizer.TargetGameBounceHelp
def exitBouncing(self):
pass
def enterScore(self):
def count(value, list):
count = 0
for item in list:
if value == item:
count += 1
return count
self.notify.debug('enterScore')
self.__killUpdateLocalToonTask()
self.__spawnGameDoneTask()
self.scoreboard.show()
score = 1
self.sendUpdate('setScore', [base.localAvatar.getX(), base.localAvatar.getY()])
topValue = 0
hitTarget = None
for target in self.targetsPlaced:
radius = self.targetSize[target[2]]
value = self.targetValue[target[2]]
posX = target[0]
posY = target[1]
dx = posX - base.localAvatar.getX()
dy = posY - base.localAvatar.getY()
distance = math.sqrt(dx * dx + dy * dy)
if distance < radius and topValue < value:
topValue = value
hitTarget = target
hitX = posX
hitY = posY
if hitTarget:
targetGN = GeomNode('target Circle')
targetNodePathGeom = self.targets.attachNewNode(targetGN)
targetNodePathGeom.setPos(hitX, hitY, 0)
addRing(targetGN, TargetGameGlobals.colorBlack, 100, self.targetSize[hitTarget[2]] + 0.5, 0)
targetNodePathGeom.setColorScale(TargetGameGlobals.colorWhite['Red'], TargetGameGlobals.colorWhite['Green'], TargetGameGlobals.colorWhite['Blue'], TargetGameGlobals.colorWhite['Alpha'])
targetNodePathGeom.setBin('ground', hitTarget[2] * 2 + 100)
targetNodePathGeom.setDepthWrite(False)
targetNodePathGeom.setDepthTest(False)
targetNodePathGeom.setTransparency(TransparencyAttrib.MAlpha)
self.targetMarker = targetNodePathGeom
score = topValue
self.localTrack = Sequence()
if self.hitInfo == 'OnButt':
self.localTrack.append(ActorInterval(base.localAvatar, 'slip-forward', startFrame=25, endFrame=55))
self.hitInfo = 'GettingUp'
else:
self.localTrack.append(ActorInterval(base.localAvatar, 'slip-forward', startFrame=50, endFrame=55))
self.localTrack.append(Wait(0.5))
if score <= 1:
self.localTrack.append(Parallel(Func(base.localAvatar.b_setAnimState, 'Sad', 1.0), Func(self.playSound, 'miss')))
else:
self.localTrack.append(Parallel(Func(base.localAvatar.b_setAnimState, 'victory', 1.0), Func(self.playSound, 'score')))
newHpr = Point3(180, 10, 0)
newPos = Point3(0, -(self.CAMERA_Y + self.TOON_Y + 12), 1)
camera.posHprInterval(5.0, newPos, newHpr, blendType='easeInOut').start()
self.help.hide()
self.localTrack.start()
return
def exitScore(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
if not self.isSinglePlayer():
for np in list(self.remoteToonCollNPs.values()):
np.removeNode()
del self.remoteToonCollNPs
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.pusher
del self.cTrav
base.localAvatar.collisionsOn()
def exitCleanup(self):
pass
def signalDone(self):
self.sendUpdate('setPlayerDone', [])
def __spawnUpdatePowerBarTask(self):
taskMgr.remove(self.UPDATE_POWERBAR_TASK)
taskMgr.add(self.__updatePowerBarTask, self.UPDATE_POWERBAR_TASK, priority=self.UPDATE_POWER_BAR_PRIORITY)
self.rubberSound.play()
self.bandVolume = 0.0
def __killUpdatePowerBarTask(self):
taskMgr.remove(self.UPDATE_POWERBAR_TASK)
taskMgr.remove(self.TOONSTRETCHTASK)
def __spawnCountDownTask(self):
self.countDown = 10
self.signalLaunch = 0
taskMgr.remove(self.UPDATE_COUNTDOWN_TASK)
self.doCountDown()
def __killCountDownTask(self):
taskMgr.remove(self.UPDATE_COUNTDOWN_TASK)
def __spawnGameDoneTask(self):
taskMgr.remove(self.GAME_DONE_TASK)
taskMgr.doMethodLater(5.0, self.__gameDone, self.GAME_DONE_TASK)
def __killUpdateGameDoneTask(self):
taskMgr.remove(self.GAME_DONE_TASK)
def __spawnUpdateLocalToonTask(self):
self.__initPosBroadcast()
taskMgr.remove(self.UPDATE_LOCALTOON_TASK)
taskMgr.add(self.__updateLocalToonTask, self.UPDATE_LOCALTOON_TASK, priority=self.UPDATE_LOCAL_TOON_PRIORITY)
def __killUpdateLocalToonTask(self):
taskMgr.remove(self.UPDATE_LOCALTOON_TASK)
def doCountDown(self, task = None):
if self.countDown < 0:
self.signalLaunch = 1
else:
self.powerBar['text'] = TTLocalizer.TargetGameCountdown % self.countDown
self.countDown -= 1
taskMgr.doMethodLater(1.0, self.doCountDown, self.UPDATE_COUNTDOWN_TASK)
def __initPosBroadcast(self):
self.__posBroadcastPeriod = 0.2
self.__timeSinceLastPosBroadcast = 0.0
self.__lastPosBroadcast = self.getAvatar(self.localAvId).getPos()
self.__storeStop = 0
lt = self.getAvatar(self.localAvId)
lt.d_clearSmoothing()
lt.sendCurrentPosition()
def __posBroadcast(self, dt):
self.__timeSinceLastPosBroadcast += dt
if self.__timeSinceLastPosBroadcast > self.__posBroadcastPeriod:
self.__timeSinceLastPosBroadcast -= self.__posBroadcastPeriod
self.getAvatar(self.localAvId).cnode.broadcastPosHprFull()
def __spawnUpdateEnvironTask(self):
taskMgr.remove(self.UPDATE_ENVIRON_TASK)
taskMgr.add(self.__updateEnvironTask, self.UPDATE_ENVIRON_TASK)
def __killUpdateEnvironTask(self):
taskMgr.remove(self.UPDATE_ENVIRON_TASK)
def __updateEnvironTask(self, task):
return
dt = globalClock.getDt()
self.speedForward = self.speedForward - self.speedForward * self.airResistance * dt
t = globalClock.getFrameTime() - self.__timeBase
self.distance = self.distance + dt * self.speedForward
distance = self.distance
distance %= self.ENVIRON_LENGTH
self.environNode.setY(-distance)
return Task.cont
def __updatePowerBarTask(self, task):
powerUp = 0
timeDiff = None
if not self.arrowKeys.rightPressed():
self.canPressRight = 1
elif self.arrowKeys.rightPressed() and self.canPressRight:
powerUp = 1
self.canPressRight = 0
if not self.arrowKeys.leftPressed():
self.canPressLeft = 1
elif self.arrowKeys.leftPressed() and self.canPressLeft:
powerUp = 1
self.canPressLeft = 0
if self.lastPressTime:
timeDiff = globalClock.getFrameTime() - self.lastPressTime
if powerUp and not self.lastPressTime:
self.lastPressTime = globalClock.getFrameTime()
self.ticker = 0.0
elif powerUp:
splitTime = abs(timeDiff)
if splitTime < 0.15:
splitTime = 0.15
self.power += 0.6 / abs(timeDiff) + 5.0
self.lastPressTime = globalClock.getFrameTime()
self.powerBar['value'] = self.power
if self.signalLaunch:
self.lastPressTime = 1
self.ticker = 1
timeDiff = 100000
if self.lastPressTime:
self.ticker += globalClock.getDt()
if self.ticker >= 0.1:
self.ticker = 0.0
powerDiv = 0.05
self.power -= 1.0 + 0.2 * (self.power * powerDiv * (self.power * powerDiv))
if timeDiff == None:
timeDiff = 0
if timeDiff > 0.5:
self.power = self.powerBar['value']
self.signalLaunch = 0
if self.power > 120:
self.power = 120
if self.power < 40:
self.power = 40
self.gameFSM.request('fly')
self.stretchY = 0.9 * self.stretchY + 0.1 * self.power
self.stretchX = 0.9 * self.stretchX + 0.1 * self.launchLaterial
base.localAvatar.setY(self.localStartPos[1] - self.stretchY * 0.12)
base.localAvatar.setX(self.localStartPos[0] + self.stretchX)
dt = globalClock.getDt()
self.__posBroadcast(dt)
stretchDiff = abs(self.stretchY - self.power) * 0.2
self.bandVolume += stretchDiff
self.bandVolume -= globalClock.getDt()
self.rubberSound.setVolume(self.bandVolume)
self.rubberSound.setPlayRate(1.0 + self.stretchY / 120.0)
return task.cont
def __updateLocalToonTask(self, task):
stateName = None
if self.gameFSM.getCurrentState():
stateName = self.gameFSM.getCurrentState().getName()
toonPos = self.getAvatar(self.localAvId).getPos()
dt = globalClock.getDt()
if stateName == 'fall':
self.speedForward = self.speedForward - self.speedForward * self.umbrellaResistance * dt
self.laterial = self.laterial - self.laterial * self.umbrellaResistance * dt
if toonPos[2] > 350.0:
diff = toonPos[2] - 350.0
self.speedForward += -diff * dt * 0.05
else:
self.speedForward = self.speedForward - self.speedForward * self.airResistance * dt
self.laterial = self.laterial - self.laterial * self.airResistance * dt
t = globalClock.getFrameTime() - self.__timeBase
self.distance = self.distance + dt * self.speedForward
if self.distance > self.maxDist:
self.distance = self.maxDist
if stateName in ['fall', 'bouncing']:
if self.distance < 15.0:
self.distance = 15.0
pos = [toonPos[0], self.distance, toonPos[2]]
if stateName in ['fly', 'fall']:
pos[0] += dt * self.laterial
liftMult = 0
if stateName == 'fly':
if self.arrowKeys.upPressed():
pass
if self.arrowKeys.downPressed():
self.gameFSM.request('fall')
elif stateName == 'fall':
if self.driftX > self.TOONXZ_SPEED:
self.driftX = self.TOONXZ_SPEED
if self.driftX < -self.TOONXZ_SPEED:
self.driftX = -self.TOONXZ_SPEED
if self.driftY > self.TOONXZ_SPEED:
self.driftY = self.TOONXZ_SPEED
if self.driftY < -self.TOONXZ_SPEED:
self.driftY = -self.TOONXZ_SPEED
if self.arrowKeys.rightPressed():
self.driftX += self.TOONXZ_SPEED * dt
if self.arrowKeys.leftPressed():
self.driftX -= self.TOONXZ_SPEED * dt
if self.arrowKeys.upPressed():
self.driftY += self.TOONXZ_SPEED * dt
if self.arrowKeys.downPressed():
self.driftY -= self.TOONXZ_SPEED * dt
pos[0] += self.driftX * dt
pos[1] += self.driftY * dt
self.distance = pos[1]
liftMult = 1
elif stateName == 'bouncing':
pos[0] += self.driftX * dt
pos[1] += self.driftY * dt
self.distance = pos[1]
else:
xVel = 0.0
if self.arrowKeys.leftPressed():
xVel -= self.TOONXZ_SPEED
if self.arrowKeys.rightPressed():
xVel += self.TOONXZ_SPEED
self.launchLaterial += xVel * dt
if self.launchLaterial < -TargetGameGlobals.MAX_LAT:
self.launchLaterial = -TargetGameGlobals.MAX_LAT
if self.launchLaterial > TargetGameGlobals.MAX_LAT:
self.launchLaterial = TargetGameGlobals.MAX_LAT
pos[0] = self.getToonPlace(self.localAvId)[0] - self.launchLaterial
lift = (self.speedForward - self.speedStall) / self.speedStall * liftMult
onScreenDebug.removeAllWithPrefix(self.getDisplayPrefix())
onScreenDebug.add('%s Vel' % self.getDisplayPrefix(), self.zVel)
onScreenDebug.add('%s Lift' % self.getDisplayPrefix(), lift)
onScreenDebug.add('%s Gravity' % self.getDisplayPrefix(), self.gravity)
onScreenDebug.add('%s Pos' % self.getDisplayPrefix(), pos[2])
onScreenDebug.add('%s Drifty' % self.getDisplayPrefix(), self.driftY)
if lift < 0:
lift = 0
upforce = lift * 10.0 - self.gravity
self.zVel += (lift - self.gravity) * dt
pos[2] += self.zVel * dt
if pos[2] < self.trampZ:
if self.jumpCount:
self.playSound('trampoline')
rebound = 0.75
if self.trampBounceCount >= 1:
rebound = 0.5
self.gameFSM.request('bouncing')
pos[2] = self.trampZ
if self.zVel != 0:
signZ = self.zVel / abs(self.zVel)
else:
signZ = 1
self.zVel = -(self.zVel * rebound - signZ * 1.0)
self.trampBounceCount += 1
if abs(self.zVel) < 1.0:
self.zVel = 0
toon = base.localAvatar
if stateName in ['fly', 'fall', 'bouncing']:
toon.b_setAnimState('neutral', 1.0)
toon.dropShadow.hide()
self.gameFSM.request('score')
elif pos[2] < 0.1:
toon = base.localAvatar
if stateName == 'fall' or stateName == 'fly':
self.gameFSM.request('bouncing')
if stateName == 'fly':
ivolume = math.sqrt(abs(self.zVel) / 40.0)
self.hitInfo = 'OnButt'
self.localTrack = ActorInterval(toon, 'slip-forward', startFrame=12, endFrame=24)
self.localTrack.start()
self.playSound('impact', ivolume)
self.playSound('bounce', ivolume)
elif stateName == 'fall' or stateName == 'bouncing':
ivolume = math.sqrt(abs(self.zVel) / 40.0)
if ivolume > 0.4:
if self.hitInfo == 'OnButt':
self.localTrack = ActorInterval(toon, 'slip-forward', startFrame=13, endFrame=24)
self.localTrack.start()
else:
self.localTrack = ActorInterval(toon, 'jump-land', startFrame=6, endFrame=21)
self.localTrack.start()
self.playSound('bounce', ivolume)
pos[2] = 0.1
rebound = 0.5
self.speedForward = self.speedForward * rebound
self.driftX = self.driftX * rebound
self.driftY = self.driftY * rebound
toon = base.localAvatar
if self.zVel != 0:
signZ = self.zVel / abs(self.zVel)
else:
signZ = 1
self.zVel = -(self.zVel * rebound - signZ * 1.0)
if abs(self.zVel) < 1.0:
self.zVel = 0
if stateName == 'fly' or stateName == 'bouncing':
self.gameFSM.request('score')
if pos[0] > TargetGameGlobals.MAX_FIELD_SPAN:
pos[0] = TargetGameGlobals.MAX_FIELD_SPAN
self.laterial = 0
if pos[0] < -TargetGameGlobals.MAX_FIELD_SPAN:
pos[0] = -TargetGameGlobals.MAX_FIELD_SPAN
self.laterial = 0
self.getAvatar(self.localAvId).setPos(pos[0], pos[1], pos[2])
if hasattr(self, 'cTrav'):
self.cTrav.traverse(render)
if stateName in ['fly', 'fall', 'bouncing']:
self.__posBroadcast(dt)
visZ = camera.getZ(render)
lowHeight = 100
lowRange = 20
opacityLow = 1
baseOpLow = 0.6
baseTransLow = 1.0 - baseOpLow
if visZ < lowHeight - lowRange or visZ > lowHeight + lowRange:
transLow = 1.0
else:
dif = abs(visZ - lowHeight)
transLow = dif / lowRange
for item in self.skyListLow:
item.setColorScale(1.0, 1.0, 1.0, transLow * baseOpLow)
midHeight = 126
midRange = 20
opacityMid = 1
baseOpMid = 1.0
baseTransMid = 1.0 - baseOpMid
if visZ < midHeight - midRange or visZ > midHeight + midRange:
transMid = 1.0
else:
dif = abs(visZ - midHeight)
transMid = dif / midRange
for item in self.skyListMid:
item.setColorScale(1.0, 1.0, 1.0, transMid * baseOpMid)
minTrans = min(1.0, transMid, transLow)
fogVis = 1.0 - minTrans
fogColor = self.AMB_COLOR * minTrans + self.CLOUD_COLOR * fogVis
fogRange = minTrans * self.FOGDISTGROUND + fogVis * self.FOGDISTCLOUD
newCamState = 'Low'
if visZ > midHeight:
newCamState = 'High'
if self.cameraState != newCamState:
self.cameraState = newCamState
self.cloudRendering('High')
elif visZ > lowHeight:
newCamState = 'Mid'
if self.cameraState != newCamState:
if self.cameraState == 'Low':
self.playSound('wind1')
self.cameraState = newCamState
self.cloudRendering('Mid')
else:
newCamState = 'Low'
if self.cameraState != newCamState:
self.cameraState = newCamState
self.cloudRendering('Low')
overlayOp = 0.5
self.fogOver.setOpacity(fogVis * overlayOp)
vol = (self.speedForward + abs(self.zVel)) / 120.0
self.flySound.setVolume(vol)
self.flySound.setPlayRate(1.0 + abs(self.zVel) * 0.005)
if self.updateTick % 5 == 0:
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
state = toon.animFSM.getCurrentState().getName()
umbrella = self.remoteUmbrellas.get(avId)
if umbrella:
if state == 'Sit':
open = umbrella.find('**/open_umbrella')
open.hide()
closed = umbrella.find('**/closed_umbrella')
closed.show()
elif state == 'swim':
open = umbrella.find('**/open_umbrella')
open.hide()
closed = umbrella.find('**/closed_umbrella')
closed.show()
elif state == 'jumpAirborne':
open = umbrella.find('**/open_umbrella')
open.show()
closed = umbrella.find('**/closed_umbrella')
closed.hide()
elif state == 'neutral':
open = umbrella.find('**/open_umbrella')
open.hide()
closed = umbrella.find('**/closed_umbrella')
closed.hide()
else:
open = umbrella.find('**/open_umbrella')
open.hide()
closed = umbrella.find('**/closed_umbrella')
closed.hide()
self.updateTick += 1
return Task.cont
def cloudRendering(self, layer):
if layer == 'Low':
for item in self.skyListMid:
item.setBin('fixed', 3)
for item in self.skyListLow:
item.setBin('fixed', 4)
elif layer == 'Mid':
for item in self.skyListMid:
item.setBin('fixed', 3)
for item in self.skyListLow:
item.setBin('fixed', 3)
elif layer == 'High':
for item in self.skyListMid:
item.setBin('fixed', 4)
for item in self.skyListLow:
item.setBin('fixed', 3)
def __addDropShadow_INTERNAL(self, object, scale_x, scale_y, scale_z, list):
shadow = self.dropShadowModel.copyTo(render)
shadow.setPos(0, self.CAMERA_Y, -100)
shadow.setBin('shadow', 100)
shadow.setDepthWrite(False)
shadow.setDepthTest(True)
shadow.setScale(scale_x, scale_y, scale_z)
list.append([shadow, object])
def __removeDropShadow_INTERNAL(self, object, list):
for i in range(len(list)):
entry = list[i]
if entry[1] == object:
entry[0].removeNode()
list.pop(i)
return
self.notify.warning('parent object ' + str(object) + ' not found in drop shadow list!')
def __addToonDropShadow(self, object):
self.__addDropShadow_INTERNAL(object, 0.5, 0.5, 0.5, self.toonDropShadows)
def __removeToonDropShadow(self, object):
self.__removeDropShadow_INTERNAL(object, self.toonDropShadows)
def __removeAllToonDropShadows(self):
for entry in self.toonDropShadows:
entry[0].removeNode()
self.toonDropShadows = []
def __spawnUpdateShadowsTask(self):
taskMgr.remove(self.UPDATE_SHADOWS_TASK)
taskMgr.add(self.__updateShadowsTask, self.UPDATE_SHADOWS_TASK, priority=self.UPDATE_SHADOWS_PRIORITY)
def __killUpdateShadowsTask(self):
taskMgr.remove(self.UPDATE_SHADOWS_TASK)
def __updateShadowsTask(self, task):
list = self.toonDropShadows
for entry in list:
object = entry[1]
y = object.getY(render)
x = object.getX(render)
if self.jumpCount and object == base.localAvatar:
z = self.trampZ + self.SHADOW_Z_OFFSET
else:
z = 0.0 + self.SHADOW_Z_OFFSET
shadow = entry[0]
shadow.setPos(x, y, z)
return Task.cont
def __spawnCollisionDetectionTask(self):
self.__ringGroupsPassed = 0
taskMgr.remove(self.COLLISION_DETECTION_TASK)
taskMgr.add(self.__collisionDetectionTask, self.COLLISION_DETECTION_TASK, priority=self.COLLISION_DETECTION_PRIORITY)
def __killCollisionDetectionTask(self):
taskMgr.remove(self.COLLISION_DETECTION_TASK)
def __collisionDetectionTask(self, task):
return Task.cont
def __endGameDolater(self, task):
self.gameOver()
return Task.done
def setTimeBase(self, timestamp):
if not self.hasLocalToon:
return
self.__timeBase = globalClockDelta.networkToLocalTime(timestamp)
def setColorIndices(self, a, b, c, d):
if not self.hasLocalToon:
return
self.colorIndices = [a,
b,
c,
d]
def getDisplayPrefix(self):
return 'TargetGame'
def __gameDone(self, task = None):
self.signalDone()
return task.done
def setGameDone(self, extra = None):
self.gameOver()
def setRoundDone(self):
if not self.hasLocalToon:
return
self.round += 1
self.roundLabel['text'] = TTLocalizer.TargetGameBoard % self.round
self.reset()
self.gameFSM.request('prelaunch')
def setSingleScore(self, score, avId):
if not self.hasLocalToon:
return
for existIndex in range(len(self.avIdList)):
if self.avIdList[existIndex] == avId:
self.scoreLabels[existIndex]['text'] = '%s' % score
def jumpIn(self, colEntry):
self.jumpCount += 1
def jumpOut(self, colEntry):
self.jumpCount -= 1
if self.jumpCount < 0:
self.jumpCount = 0
```
#### File: toontown/minigame/DivingFishSpawn.py
```python
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from direct.actor import Actor
import random
from . import DivingGameGlobals
class DivingFishSpawn(DirectObject):
RADIUS = 0.7
def __init__(self, spawnId, direction, position, cHandler):
loadBase = 'phase_4/models/char/'
self.entryNode = render.attachNewNode('entryNode')
self.direction = direction
self.fishArray = {}
self.inactiveArray = []
self.fishActive = 0
self.position = position
self.spawnId = spawnId
self.id = -1
def getUniqueNumber(self):
self.id += 1
return self.id
def createFish(self, fishcode):
loadBase = 'phase_4/models/char/'
if fishcode is 0:
fish = Actor.Actor('phase_4/models/char/clownFish-zero.bam', {'anim': loadBase + 'clownFish-swim.bam'})
fish.name = 'clown'
elif fishcode is 1:
fish = Actor.Actor('phase_4/models/char/PBJfish-zero.bam', {'anim': 'phase_4/models/char/PBJfish-swim.bam'})
fish.name = 'pbj'
elif fishcode is 2:
fish = Actor.Actor('phase_4/models/char/BearAcuda-zero.bam', {'anim': 'phase_4/models/char/BearAcuda-swim.bam'})
fish.name = 'bear'
elif fishcode is 3:
fish = Actor.Actor(loadBase + 'balloonFish-zero.bam', {'anim': loadBase + 'balloonFish-swim.bam'})
fish.name = 'balloon'
elif fishcode is 4:
fish = Actor.Actor(loadBase + 'nurseShark-zero.bam', {'anim': loadBase + 'nurseShark-swim.bam'})
fish.name = 'nurse'
elif fishcode is 5:
fish = Actor.Actor(loadBase + 'pianoTuna-zero.bam', {'anim': loadBase + 'pianoTuna-swim.bam'})
fish.name = 'piano'
else:
return
fish.active = 1
fish.direction = self.direction
idCode = self.getUniqueNumber()
fish.code = str(self.spawnId) + str(idCode)
self.fishArray[idCode] = fish
fish.reparentTo(render)
fish.setScale(1)
fish.moveLerp = Sequence()
if fish.name == 'clown':
fish.setH(90 * self.direction)
fish.loop('anim')
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.2)
elif fish.name == 'pbj':
fish.setH(15 * self.direction)
fish.loop('anim')
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1)
elif fish.name == 'balloon':
fish.setH(0)
fish.loop('anim', fromFrame=0, toFrame=94)
fish.setScale(2)
cSphere = CollisionSphere(0.0, 0.0, 0.0, 0.2)
elif fish.name == 'bear':
fish.setH(90 * self.direction)
cSphere = CollisionSphere(0.0, -1.0, 3.5, 3.0)
fish.loop('anim')
fish.setScale(0.4, 1.7, 1.7)
elif fish.name == 'nurse':
fish.setH(90 * self.direction)
cSphere = CollisionSphere(0.0, -1.0, 0.0, 1)
fish.setScale(0.5, 1.7, 1.7)
fish.loop('anim')
elif fish.name == 'mackerel':
fish.setH(90 * self.direction)
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.5)
fish.loop('anim', fromFrame=36, toFrame=80)
elif fish.name == 'piano':
fish.loop('anim')
fish.setScale(1.4)
cSphere = CollisionSphere(0, 0, 0, 1)
fishSoundName = 'Piano_Tuna.ogg'
if self.direction is -1:
fish.setH(0)
else:
fish.setH(180)
cSphere.setTangible(0)
fish.offset = 0
cSphereNode = CollisionNode('fc' + str(fish.code))
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(BitMask32.allOff())
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNodePath = fish.attachNewNode(cSphereNode)
self.accept('into-' + 'fc' + str(fish.code), self.__handleFishCollide)
fish.moveloop = Sequence(Wait(4), LerpScaleInterval(fish, startScale=1, scale=3, duration=1), Wait(1.5), LerpScaleInterval(fish, startScale=3, scale=1, duration=0.5))
return fish
def destroy(self):
self.ignoreAll()
for fish in list(self.fishArray.values()):
fish.moveLerp.pause()
fish.specialLerp.finish()
if hasattr(fish, 'sound'):
fish.sound.stop()
fish.sound = None
fish.moveLerp = None
fish.specialLerp = None
fish.removeNode()
del fish
return
def __handleFishCollide(self, collEntry):
messenger.send('FishHit', [collEntry])
```
#### File: toontown/nametag/NametagGroup.py
```python
from direct.task.Task import Task
from pandac.PandaModules import VBase4, PandaNode
from toontown.margins.MarginVisible import MarginVisible
from toontown.nametag import NametagGlobals
from toontown.nametag.Nametag2d import Nametag2d
from toontown.nametag.Nametag3d import Nametag3d
class NametagGroup:
CHAT_TIMEOUT_MIN = 4.0
CHAT_TIMEOUT_MAX = 12.0
CHAT_STOMP_DELAY = 0.2
def __init__(self):
self.avatar = None
self.active = True
self.objectCode = None
self.chatButton = NametagGlobals.noButton
self.chatReversed = False
self.font = None
self.chatFont = None
self.shadow = None
self.marginManager = None
self.visible3d = True
self.chatType = NametagGlobals.CHAT
self.chatBalloonType = NametagGlobals.CHAT_BALLOON
self.nametagColor = NametagGlobals.NametagColors[NametagGlobals.CCNormal]
self.chatColor = NametagGlobals.ChatColors[NametagGlobals.CCNormal]
self.speedChatColor = VBase4(1, 1, 1, 1)
self.wordWrap = 8
self.chatWordWrap = 12
self.text = ''
self.chatPages = []
self.chatPageIndex = 0
self.chatTimeoutTask = None
self.chatTimeoutTaskName = self.getUniqueName() + '-timeout'
self.stompChatText = ''
self.stompTask = None
self.stompTaskName = self.getUniqueName() + '-stomp'
self.icon = PandaNode('icon')
self.nametag2d = Nametag2d()
self.nametag3d = Nametag3d()
self.nametags = set()
self.add(self.nametag2d)
self.add(self.nametag3d)
# Add the tick task:
self.tickTaskName = self.getUniqueName() + '-tick'
self.tickTask = taskMgr.add(self.tick, self.tickTaskName, sort=45)
def destroy(self):
if self.marginManager is not None:
self.unmanage(self.marginManager)
if self.tickTask is not None:
taskMgr.remove(self.tickTask)
self.tickTask = None
self.clearChatText()
for nametag in list(self.nametags):
self.remove(nametag)
self.nametag2d = None
self.nametag3d = None
if self.icon is not None:
self.icon.removeAllChildren()
self.icon = None
self.chatFont = None
self.font = None
self.chatButton = NametagGlobals.noButton
self.avatar = None
def getUniqueName(self):
return 'NametagGroup-' + str(id(self))
def tick(self, task):
if (self.avatar is None) or (self.avatar.isEmpty()):
return Task.cont
chatText = self.getChatText()
if (NametagGlobals.forceOnscreenChat and
chatText and
self.chatBalloonType == NametagGlobals.CHAT_BALLOON):
visible3d = False
elif self.avatar == NametagGlobals.me:
if (chatText and
self.chatBalloonType == NametagGlobals.CHAT_BALLOON and
not base.cam.node().isInView(self.avatar.getPos(base.cam))):
visible3d = False
else:
visible3d = True
elif NametagGlobals.force2dNametags:
visible3d = False
elif (not NametagGlobals.want2dNametags and
((not chatText) or (self.chatBalloonType != NametagGlobals.CHAT_BALLOON))):
visible3d = True
elif self.avatar.isHidden():
visible3d = False
else:
visible3d = base.cam.node().isInView(self.avatar.getPos(base.cam))
if visible3d != self.visible3d:
self.visible3d = visible3d
if self.nametag2d is not None:
self.nametag2d.setVisible(not visible3d)
return Task.cont
def setAvatar(self, avatar):
self.avatar = avatar
for nametag in self.nametags:
nametag.setAvatar(self.avatar)
def getAvatar(self):
return self.avatar
def setActive(self, active):
self.active = active
for nametag in self.nametags:
nametag.setActive(self.active)
def getActive(self):
return self.active
def setObjectCode(self, objectCode):
self.objectCode = objectCode
def getObjectCode(self):
return self.objectCode
def setChatButton(self, chatButton):
self.chatButton = chatButton
for nametag in self.nametags:
nametag.setChatButton(self.chatButton)
def getChatButton(self):
return self.chatButton
def hasChatButton(self):
return self.chatButton != NametagGlobals.noButton
def setChatReversed(self, reversed):
self.chatReversed = reversed
for nametag in self.nametags:
nametag.setChatReversed(reversed)
def getChatReversed(self):
return self.chatReversed
def setFont(self, font):
self.font = font
for nametag in self.nametags:
nametag.setFont(self.font)
def getFont(self):
return self.font
def setChatFont(self, chatFont):
self.chatFont = chatFont
for nametag in self.nametags:
nametag.setChatFont(self.chatFont)
def getChatFont(self):
return self.chatFont
def setShadow(self, shadow):
self.shadow = shadow
for nametag in self.nametags:
nametag.setShadow(self.shadow)
def getShadow(self):
return self.shadow
def clearShadow(self):
self.shadow = None
for nametag in self.nametags:
nametag.clearShadow()
def setChatType(self, chatType):
self.chatType = chatType
for nametag in self.nametags:
nametag.setChatType(self.chatType)
def getChatType(self):
return self.chatType
def setChatBalloonType(self, chatBalloonType):
self.chatBalloonType = chatBalloonType
for nametag in self.nametags:
nametag.setChatBalloonType(self.chatBalloonType)
def getChatBalloonType(self):
return self.chatBalloonType
def setNametagColor(self, nametagColor):
self.nametagColor = nametagColor
for nametag in self.nametags:
nametag.setNametagColor(self.nametagColor)
def getNametagColor(self):
return self.nametagColor
def setChatColor(self, chatColor):
self.chatColor = chatColor
for nametag in self.nametags:
nametag.setChatColor(self.chatColor)
def getChatColor(self):
return self.chatColor
def setSpeedChatColor(self, speedChatColor):
self.speedChatColor = speedChatColor
for nametag in self.nametags:
nametag.setSpeedChatColor(self.speedChatColor)
def getSpeedChatColor(self):
return self.speedChatColor
def setWordWrap(self, wordWrap):
self.wordWrap = wordWrap
for nametag in self.nametags:
nametag.setWordWrap(self.wordWrap)
def getWordWrap(self):
return self.wordWrap
def setChatWordWrap(self, chatWordWrap):
self.chatWordWrap = chatWordWrap
for nametag in self.nametags:
nametag.setChatWordWrap(self.chatWordWrap)
def getChatWordWrap(self):
return self.chatWordWrap
def setText(self, text):
self.text = text
if isinstance(self.text, bytes):
self.text = self.text.decode()
for nametag in self.nametags:
nametag.setText(self.text)
nametag.update()
def getText(self):
return self.text
def getNumChatPages(self):
return len(self.chatPages)
def setChatPageIndex(self, chatPageIndex):
if chatPageIndex >= self.getNumChatPages():
return
self.chatPageIndex = chatPageIndex
for nametag in self.nametags:
nametag.setChatText(self.chatPages[self.chatPageIndex])
nametag.update()
def getChatPageIndex(self):
return self.chatPageIndex
def setChatText(self, chatText, timeout=False):
# If we are currently displaying chat text, we need to "stomp" it. In
# other words, we need to clear the current chat text, pause for a
# brief moment, and then display the new chat text:
if self.getChatText():
self.clearChatText()
self.stompChatText = chatText
self.stompTask = taskMgr.doMethodLater(
self.CHAT_STOMP_DELAY, self.__chatStomp, self.stompTaskName,
extraArgs=[timeout])
return
self.clearChatText()
self.chatPages = chatText.split('\x07')
self.setChatPageIndex(0)
if timeout:
delay = len(self.getChatText()) * 0.5
if delay < self.CHAT_TIMEOUT_MIN:
delay = self.CHAT_TIMEOUT_MIN
elif delay > self.CHAT_TIMEOUT_MAX:
delay = self.CHAT_TIMEOUT_MAX
self.chatTimeoutTask = taskMgr.doMethodLater(
delay, self.clearChatText, self.chatTimeoutTaskName)
def getChatText(self):
if self.chatPageIndex >= self.getNumChatPages():
return ''
return self.chatPages[self.chatPageIndex]
def clearChatText(self, task=None):
if self.stompTask is not None:
taskMgr.remove(self.stompTask)
self.stompTask = None
self.stompChatText = ''
if self.chatTimeoutTask is not None:
taskMgr.remove(self.chatTimeoutTask)
self.chatTimeoutTask = None
self.chatPages = []
self.chatPageIndex = 0
for nametag in self.nametags:
nametag.setChatText('')
nametag.update()
if task is not None:
return Task.done
def getStompChatText(self):
return self.stompChatText
def setIcon(self, icon):
self.icon = icon
for nametag in self.nametags:
nametag.setIcon(self.icon)
def getIcon(self):
return self.icon
def setNametag2d(self, nametag2d):
if self.nametag2d is not None:
self.remove(self.nametag2d)
self.nametag2d = None
if nametag2d is None:
return
self.nametag2d = nametag2d
self.add(self.nametag2d)
def getNametag2d(self):
return self.nametag2d
def setNametag3d(self, nametag3d):
if self.nametag3d is not None:
self.remove(self.nametag3d)
self.nametag3d = None
if nametag3d is None:
return
self.nametag3d = nametag3d
self.add(self.nametag3d)
def getNametag3d(self):
return self.nametag3d
def add(self, nametag):
self.nametags.add(nametag)
nametag.setAvatar(self.avatar)
nametag.setActive(self.active)
nametag.setClickEvent(self.getUniqueName())
nametag.setChatButton(self.chatButton)
nametag.setFont(self.font)
nametag.setChatFont(self.chatFont)
nametag.setChatType(self.chatType)
nametag.setChatBalloonType(self.chatBalloonType)
nametag.setNametagColor(self.nametagColor)
nametag.setChatColor(self.chatColor)
nametag.setSpeedChatColor(self.speedChatColor)
nametag.setWordWrap(self.wordWrap)
nametag.setChatWordWrap(self.chatWordWrap)
nametag.setText(self.text)
nametag.setChatText(self.getChatText())
nametag.setIcon(self.icon)
nametag.update()
def remove(self, nametag):
nametag.destroy()
self.nametags.remove(nametag)
def updateAll(self):
for nametag in self.nametags:
nametag.update()
def manage(self, marginManager):
if self.marginManager is not None:
self.unmanage(self.marginManager)
self.marginManager = marginManager
for nametag in self.nametags:
if isinstance(nametag, MarginVisible):
nametag.manage(self.marginManager)
def unmanage(self, marginManager):
if marginManager != self.marginManager:
return
if self.marginManager is None:
return
self.marginManager = marginManager
for nametag in self.nametags:
if isinstance(nametag, MarginVisible):
nametag.unmanage(self.marginManager)
def hideNametag(self):
for nametag in self.nametags:
nametag.hideNametag()
def showNametag(self):
for nametag in self.nametags:
nametag.showNametag()
def hideChat(self):
for nametag in self.nametags:
nametag.hideChat()
def showChat(self):
for nametag in self.nametags:
nametag.showChat()
def hideThought(self):
for nametag in self.nametags:
nametag.hideThought()
def showThought(self):
for nametag in self.nametags:
nametag.showThought()
def __chatStomp(self, timeout=False):
self.setChatText(self.stompChatText, timeout=timeout)
self.stompChatText = ''
```
#### File: toontown/parties/KeyCodes.py
```python
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
ARROW_KEYCODE_MAP = {base.MOVE_UP: 'u',
base.MOVE_DOWN: 'd',
base.MOVE_LEFT: 'l',
base.MOVE_RIGHT: 'r'}
KEYCODE_TIMEOUT_SECONDS = 1.5
class KeyCodes(DirectObject):
notify = directNotify.newCategory('KeyCodes')
PATTERN_MATCH_EVENT = 'KeyCodes-PATTERN_MATCH'
PATTERN_NO_MATCH_EVENT = 'KeyCodes-PATTERN_NO_MATCH'
KEY_DOWN_EVENT = 'KeyCodes-KEY_DOWN_EVENT'
KEY_UP_EVENT = 'KeyCodes-KEY_UP_EVENT'
CLEAR_CODE_EVENT = 'KeyCodes-CLEAR_CODE_EVENT'
def __init__(self, keyMap = ARROW_KEYCODE_MAP, patterns = None, timeout = KEYCODE_TIMEOUT_SECONDS):
self._keyMap = keyMap
self._timeout = timeout
self._keyCode = ''
self._keyCodeCount = 0
self._keyCodeTime = 0.0
self._patterns = []
self._patternLimit = 0
self._enabled = False
self._keyDown = None
self._keysPressed = 0
self.listenForPatterns(patterns)
return
def destroy(self):
self.disable()
self.ignoreAll()
self._patterns = []
del self._patterns
del self._keyMap
del self._timeout
def listenForPatterns(self, patterns):
self._patterns = patterns
for pattern in self._patterns:
if len(pattern) > self._patternLimit:
self._patternLimit = len(pattern)
if self._enabled:
self.disable()
self.enable()
def isAnyKeyPressed(self):
return self._keysPressed > 0
def getCurrentInputLength(self):
return self._keyCodeCount + 1
def getLargestPatternLength(self):
return self._patternLimit
def getPossibleMatchesList(self):
return [ p for p in self._patterns if p.startswith(self._keyCode) ]
def reset(self):
self._keyCode = ''
self._keyCodeCount = 0
self._keyCodeTime = 0.0
def enable(self):
if not self._enabled:
self.notify.debug('Key codes enabled')
self._enabled = True
self.__enableControls()
def disable(self):
if self._enabled:
self.notify.debug('Key codes disabled')
self.__disableControls()
self.reset()
self._enabled = False
self._keyDown = None
self._keysPressed = 0
return
def __enableControls(self):
for key in list(self._keyMap.keys()):
self.__acceptKeyDown(key)
self.__acceptKeyUp(key)
def __disableControls(self):
self.ignoreAll()
def __acceptKeyDown(self, key):
self.accept(key, self.__handleKeyDown, [key])
def __acceptKeyUp(self, key):
self.accept(key + '-up', self.__handleKeyUp, [key])
def __handleKeyDown(self, key):
self._keysPressed += 1
if self._keyDown is None and self._keysPressed == 1:
self.__updateElapsedTime()
messenger.send(KeyCodes.KEY_DOWN_EVENT, [self._keyMap[key], self._keyCodeCount])
self._keyCode += self._keyMap[key]
self._keyCodeCount += 1
self._keyDown = key
self.__checkForPattern()
else:
messenger.send(KeyCodes.KEY_DOWN_EVENT, [-1, -1])
return
def __handleKeyUp(self, key):
arg = -1
if self._keysPressed > 0:
self._keysPressed -= 1
if self._keyDown == key and self._keysPressed == 0:
arg = self._keyMap[key]
if self._keysPressed == 0:
self._keyDown = None
messenger.send(KeyCodes.KEY_UP_EVENT, [arg])
return
def __updateElapsedTime(self):
if self._keyCodeTime != 0.0 and globalClock.getFrameTime() - self._keyCodeTime >= self._timeout:
self.notify.debug('Key code timed out. Resetting...')
self.reset()
messenger.send(KeyCodes.CLEAR_CODE_EVENT)
self._keyCodeTime = globalClock.getFrameTime()
def __checkForPattern(self):
if self._keyCode in self._patterns:
messenger.send(KeyCodes.PATTERN_MATCH_EVENT, [self._keyCode])
self.reset()
elif self._keyCodeCount == self._patternLimit or len(self.getPossibleMatchesList()) == 0:
messenger.send(KeyCodes.PATTERN_NO_MATCH_EVENT)
self.reset()
```
#### File: toontown/quest/TutorialManagerAI.py
```python
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.quest.Quests import TT_TIER
from toontown.building import DistributedDoorAI, DistributedHQInteriorAI, DistributedTutorialInteriorAI, DoorTypes, FADoorCodes
from toontown.suit import DistributedTutorialSuitAI
from toontown.toon import NPCToons, Experience
class TutorialHandler:
def __init__(self, mgr, avId):
self.mgr = mgr
self.air = self.mgr.air
self.avId = avId
self.zones = (-1, -1, -1, -1)
self.event = self.air.getAvatarExitEvent(avId)
self.mgr.acceptOnce(self.event, self.abort)
self.doorEvents = []
def create(self):
self.branchZone = self.air.zoneAllocator.allocate()
self.interiorZone = self.air.zoneAllocator.allocate()
self.hqZone = self.air.zoneAllocator.allocate()
self.streetInteriorDoor = DistributedDoorAI.DistributedDoorAI(self.air, 2, DoorTypes.EXT_STANDARD, lockValue = FADoorCodes.DEFEAT_FLUNKY_TOM)
self.interiorDoor = DistributedDoorAI.DistributedDoorAI(self.air, 2, DoorTypes.INT_STANDARD, lockValue = FADoorCodes.TALK_TO_TOM)
self.streetInteriorDoor.setOtherDoor(self.interiorDoor)
self.interiorDoor.setOtherDoor(self.streetInteriorDoor)
self.acceptDoorEvent(1, {self.interiorDoor: FADoorCodes.UNLOCKED}) # after tom
self.streetInteriorDoor.generateWithRequired(self.branchZone)
self.streetInteriorDoor.sendUpdate('setDoorIndex', [self.streetInteriorDoor.getDoorIndex()])
self.interiorDoor.generateWithRequired(self.interiorZone)
self.interiorDoor.sendUpdate('setDoorIndex', [self.interiorDoor.getDoorIndex()])
self.tom = NPCToons.createNPC(self.air, 20000, NPCToons.NPCToonDict[20000], self.interiorZone)
self.tom.setTutorial(1)
self.tom.b_setParent(1)
self.tomInterior = DistributedTutorialInteriorAI.DistributedTutorialInteriorAI(2, self.air, self.interiorZone)
self.tomInterior.setTutorialNpcId(self.tom.doId)
self.tomInterior.generateWithRequired(self.interiorZone)
self.cog = DistributedTutorialSuitAI.DistributedTutorialSuitAI(self.air, self)
self.cog.setupSuitDNA(1, 0, 'c')
self.cog.generateWithRequired(self.branchZone)
self.streetNpc = None
self.hqInterior = DistributedHQInteriorAI.DistributedHQInteriorAI(1, self.air, self.hqZone)
self.hqInterior.generateWithRequired(self.hqZone)
self.hqInterior.setTutorial(1)
self.hqNpc = NPCToons.createNPC(self.air, 20002, NPCToons.NPCToonDict[20002], self.hqZone)
self.hqNpc.setTutorial(1)
self.hqNpc.setHq(1)
self.streetHqDoor = DistributedDoorAI.DistributedDoorAI(self.air, 1, DoorTypes.EXT_HQ, doorIndex = 0, lockValue = FADoorCodes.DEFEAT_FLUNKY_HQ)
self.streetHqDoor2 = DistributedDoorAI.DistributedDoorAI(self.air, 1, DoorTypes.EXT_HQ, doorIndex = 1, lockValue = FADoorCodes.DEFEAT_FLUNKY_HQ)
self.hqInsideDoor = DistributedDoorAI.DistributedDoorAI(self.air, 1, DoorTypes.INT_HQ, doorIndex = 0, lockValue = FADoorCodes.TALK_TO_HQ)
self.hqInsideDoor2 = DistributedDoorAI.DistributedDoorAI(self.air, 1, DoorTypes.INT_HQ, doorIndex = 1, lockValue = FADoorCodes.TALK_TO_HQ)
self.acceptDoorEvent(2, {
self.streetHqDoor: FADoorCodes.GO_TO_PLAYGROUND,
self.streetHqDoor2: FADoorCodes.GO_TO_PLAYGROUND,
self.hqInsideDoor2: FADoorCodes.UNLOCKED,
}
)
self.streetHqDoor.setOtherDoor(self.hqInsideDoor)
self.hqInsideDoor.setOtherDoor(self.streetHqDoor)
self.streetHqDoor2.setOtherDoor(self.hqInsideDoor2)
self.hqInsideDoor2.setOtherDoor(self.streetHqDoor2)
self.streetHqDoor.generateWithRequired(self.branchZone)
self.streetHqDoor2.generateWithRequired(self.branchZone)
self.streetHqDoor.sendUpdate('setDoorIndex', [self.streetHqDoor.getDoorIndex()])
self.streetHqDoor2.sendUpdate('setDoorIndex', [self.streetHqDoor2.getDoorIndex()])
self.hqInsideDoor.generateWithRequired(self.hqZone)
self.hqInsideDoor2.generateWithRequired(self.hqZone)
self.hqInsideDoor.sendUpdate('setDoorIndex', [self.hqInsideDoor.getDoorIndex()])
self.hqInsideDoor2.sendUpdate('setDoorIndex', [self.hqInsideDoor2.getDoorIndex()])
self.zones = (20000, self.branchZone, self.interiorZone, self.hqZone)
def deletes(self, object):
if object:
if object.air is not None:
object.requestDelete()
def destroy(self):
for ev in self.doorEvents:
self.mgr.ignore(ev)
self.mgr.ignore(self.event)
self.zones = (-1, -1, -1, -1)
return # broken (crashes)
self.deletes(self.streetInteriorDoor)
self.deletes(self.interiorDoor)
self.deletes(self.tom)
self.deletes(self.tomInterior)
self.deletes(self.cog)
self.deletes(self.streetNpc)
self.deletes(self.hqInterior)
self.deletes(self.hqNpc)
self.deletes(self.streetHqDoor)
self.deletes(self.streetHqDoor2)
self.deletes(self.hqInsideDoor)
self.deletes(self.hqInsideDoor2)
self.air.zoneAllocator.free(self.branchZone)
self.air.zoneAllocator.free(self.interiorZone)
self.air.zoneAllocator.free(self.hqZone)
def getZones(self):
return self.zones
def cogDefeated(self):
self.streetInteriorDoor.setDoorLock(FADoorCodes.TALK_TO_HQ)
self.streetHqDoor.setDoorLock(FADoorCodes.UNLOCKED)
self.streetHqDoor2.setDoorLock(FADoorCodes.UNLOCKED)
def abort(self):
self.destroy()
self.mgr.cleanup(self.avId, abort = True)
self.mgr = None
self.air = None
def finish(self):
self.destroy()
self.mgr = None
self.air = None
def acceptDoorEvent(self, index, map):
def x():
for door, code in list(map.items()):
door.setDoorLock(code)
if index == 2 and self.streetNpc is None:
self.streetNpc = NPCToons.createNPC(self.air, 20001, NPCToons.NPCToonDict[20001], self.branchZone)
self.streetNpc.setTutorial(1)
self.streetNpc.d_setPos(207, 19, -0.48)
self.streetNpc.d_setHpr(90, 0, 0)
ev = "tutorial-door-event-%d-%d" % (self.avId, index)
self.mgr.accept(ev, x)
self.doorEvents.append(x)
class TutorialManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("TutorialManagerAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.av2tut = {}
def requestTutorial(self):
sender = self.air.getAvatarIdFromSender()
if sender in self.av2tut:
self.notify.warning("Got second requestTutorial from avatar %d" % sender)
self.air.writeServerEvent('suspicious', sender, 'requested tutorial but already in one')
return
tut = TutorialHandler(self, sender)
self.av2tut[sender] = tut
tut.create()
self.d_enterTutorial(sender, *tut.getZones())
def rejectTutorial(self):
self.requestSkipTutorial()
def requestSkipTutorial(self):
sender = self.air.getAvatarIdFromSender()
if not sender in self.air.doId2do:
self.air.writeServerEvent('suspicious', sender, 'requested skip tutorial outside shard')
return
self.callOrWait(sender, self.__doSkip)
def callOrWait(self, sender, func):
if sender in self.air.doId2do:
func(sender)
return
self.acceptOnce('toon-generate-%d' % sender, self.__doSkip, [sender])
def __doSkip(self, sender):
av = self.air.doId2do.get(sender)
av.b_setRewardHistory(TT_TIER, [])
av.b_setTutorialAck(1)
av.b_setQuests([[163, 1000, 1000, 100, 3]])
self.air.writeServerEvent('skipTutorial', sender, self.air.districtId)
self.sendUpdateToChannel(sender, "skipTutorialResponse", [1])
def d_enterTutorial(self, av, *zones):
self.sendUpdateToAvatarId(av, "enterTutorial", zones)
def allDone(self):
sender = self.air.getAvatarIdFromSender()
if sender not in self.av2tut:
self.notify.warning("Got allDone from unknown avatar %d" % sender)
self.air.writeServerEvent('suspicious', sender, 'sent allDone but not doing tutorial')
return
av = self.air.doId2do[sender]
av.b_setTutorialAck(1)
self.cleanup(sender)
def toonArrived(self):
sender = self.air.getAvatarIdFromSender()
if sender not in self.av2tut:
self.notify.warning("Got toonArrived from unknown avatar %d" % sender)
self.air.writeServerEvent('suspicious', sender, 'sent toonArrived but not doing tutorial')
return
self.callOrWait(sender, self.__doEnter)
def __doEnter(self, sender):
av = self.air.doId2do[sender]
inventory = av.inventory
inventory.zeroInv(True)
inventory.addItem(4, 0)
inventory.addItem(5, 0)
av.b_setInventory(inventory.makeNetString())
av.b_setHp(15)
av.b_setMaxHp(15)
exp = Experience.Experience(av.getExperience(), av)
for i in range(8):
exp.experience[i] = 0
av.b_setExperience(exp.makeNetString())
av.b_setQuests([])
av.b_setQuestHistory([])
av.b_setRewardHistory(0, [])
def cleanup(self, avId, abort = False):
if not abort:
self.av2tut[avId].finish()
del self.av2tut[avId]
```
#### File: toontown/safezone/DistributedGameTableAI.py
```python
from direct.distributed import DistributedObjectAI
class DistributedGameTableAI(DistributedObjectAI.DistributedObjectAI):
def __init__(self, air):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.posHpr = (0, 0, 0, 0, 0, 0)
# Required Fields:
def setPosHpr(self, x, y, z, h, p, r):
self.posHpr = (x, y, z, h, p, r)
def getPosHpr(self):
return self.posHpr
# Receive Fields:
def requestJoin(self, seatIndex):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'rejectJoin', [])
def requestExit(self):
pass
```
#### File: toontown/safezone/SZTreasurePlannerAI.py
```python
from .RegenTreasurePlannerAI import RegenTreasurePlannerAI
from direct.directnotify import DirectNotifyGlobal
class SZTreasurePlannerAI(RegenTreasurePlannerAI):
notify = DirectNotifyGlobal.directNotify.newCategory('SZTreasurePlannerAI')
def __init__(self, zoneId, treasureType, healAmount, spawnPoints, spawnRate, maxTreasures):
self.zoneId = zoneId
self.spawnPoints = spawnPoints
self.healAmount = healAmount
RegenTreasurePlannerAI.__init__(self, zoneId, treasureType, 'SZTreasurePlanner-%d' % zoneId, spawnRate, maxTreasures)
def initSpawnPoints(self):
pass
def validAvatar(self, treasure, av):
# Avatars can only heal if they are missing some health, but aren't sad.
if av.getHp() < av.getMaxHp() and av.getHp() > 0:
av.toonUp(self.healAmount)
return True
else:
return False
```
#### File: toontown/safezone/TTSafeZoneLoader.py
```python
from toontown.safezone import SafeZoneLoader
from toontown.safezone import TTPlayground
class TTSafeZoneLoader(SafeZoneLoader.SafeZoneLoader):
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
self.playgroundClass = TTPlayground.TTPlayground
self.musicFile = 'phase_4/audio/bgm/TC_nbrhood.ogg'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.ogg'
self.dnaFile = 'phase_4/dna/toontown_central_sz.dna'
self.safeZoneStorageDNAFile = 'phase_4/dna/storage_TT_sz.dna'
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
self.birdSound = list(map(base.loader.loadSfx, ['phase_4/audio/sfx/SZ_TC_bird1.ogg',
'phase_4/audio/sfx/SZ_TC_bird2.ogg',
'phase_4/audio/sfx/SZ_TC_bird3.ogg']))
bank = self.geom.find('**/*toon_landmark_TT_bank_DNARoot')
doorTrigger = bank.find('**/door_trigger*')
doorTrigger.setY(doorTrigger.getY() - 1.5)
def unload(self):
SafeZoneLoader.SafeZoneLoader.unload(self)
del self.birdSound
```
#### File: toontown/shtiker/InventoryPage.py
```python
from . import ShtikerPage
from toontown.toonbase import ToontownBattleGlobals
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.shtiker.CogMenu import CogMenu
class InventoryPage(ShtikerPage.ShtikerPage):
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.currentTrackInfo = None
self.onscreen = 0
self.lastInventoryTime = globalClock.getRealTime()
return
def load(self):
ShtikerPage.ShtikerPage.load(self)
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.InventoryPageTitle, text_scale=0.12, textMayChange=1, pos=(0, 0, 0.62))
self.gagFrame = DirectFrame(parent=self, relief=None, pos=(0.1, 0, -0.47), scale=(0.35, 0.35, 0.35), geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor)
self.trackInfo = DirectFrame(parent=self, relief=None, pos=(-0.4, 0, -0.47), scale=(0.35, 0.35, 0.35), geom=DGG.getDefaultDialogGeom(), geom_scale=(1.4, 1, 1), geom_color=ToontownGlobals.GlobalDialogColor, text='', text_wordwrap=11, text_align=TextNode.ALeft, text_scale=0.12, text_pos=(-0.65, 0.3), text_fg=(0.05, 0.14, 0.4, 1))
self.trackProgress = DirectWaitBar(parent=self.trackInfo, pos=(0, 0, -0.2), relief=DGG.SUNKEN, frameSize=(-0.6,
0.6,
-0.1,
0.1), borderWidth=(0.025, 0.025), scale=1.1, frameColor=(0.4, 0.6, 0.4, 1), barColor=(0.9, 1, 0.7, 1), text='0/0', text_scale=0.15, text_fg=(0.05, 0.14, 0.4, 1), text_align=TextNode.ACenter, text_pos=(0, -0.22))
self.trackProgress.hide()
jarGui = loader.loadModel('phase_3.5/models/gui/jar_gui')
self.moneyDisplay = DirectLabel(parent=self, relief=None, pos=(0.55, 0, -0.5), scale=0.8, text=str(base.localAvatar.getMoney()), text_scale=0.18, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0, -0.1, 0), image=jarGui.find('**/Jar'), text_font=ToontownGlobals.getSignFont())
jarGui.removeNode()
self.cogMenu = CogMenu()
self.cogMenu.reparentTo(self)
self.cogMenu.setX(-0.165)
self.cogMenu.setZ(0.63)
self.cogMenu.setScale(0.82)
self.cogMenu.hide()
def unload(self):
del self.title
self.cogMenu.cleanup()
del self.cogMenu
ShtikerPage.ShtikerPage.unload(self)
def __moneyChange(self, money):
self.moneyDisplay['text'] = str(money)
def enter(self):
ShtikerPage.ShtikerPage.enter(self)
base.localAvatar.inventory.setActivateMode('book')
base.localAvatar.inventory.show()
base.localAvatar.inventory.reparentTo(self)
self.moneyDisplay['text'] = str(base.localAvatar.getMoney())
self.accept('enterBookDelete', self.enterDeleteMode)
self.accept('exitBookDelete', self.exitDeleteMode)
self.accept('enterTrackFrame', self.updateTrackInfo)
self.accept('exitTrackFrame', self.clearTrackInfo)
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
def exit(self):
ShtikerPage.ShtikerPage.exit(self)
self.clearTrackInfo(self.currentTrackInfo)
self.ignore('enterBookDelete')
self.ignore('exitBookDelete')
self.ignore('enterTrackFrame')
self.ignore('exitTrackFrame')
self.ignore(localAvatar.uniqueName('moneyChange'))
self.makePageWhite(None)
base.localAvatar.inventory.hide()
base.localAvatar.inventory.reparentTo(hidden)
self.exitDeleteMode()
return
def enterDeleteMode(self):
self.title['text'] = TTLocalizer.InventoryPageDeleteTitle
self.title['text_fg'] = (0, 0, 0, 1)
self.book['image_color'] = Vec4(1, 1, 1, 1)
def exitDeleteMode(self):
self.title['text'] = TTLocalizer.InventoryPageTitle
self.title['text_fg'] = (0, 0, 0, 1)
self.book['image_color'] = Vec4(1, 1, 1, 1)
def updateTrackInfo(self, trackIndex):
self.currentTrackInfo = trackIndex
trackName = TextEncoder.upper(ToontownBattleGlobals.Tracks[trackIndex])
if base.localAvatar.hasTrackAccess(trackIndex):
curExp, nextExp = base.localAvatar.inventory.getCurAndNextExpValues(trackIndex)
trackText = '%s / %s' % (curExp, nextExp)
self.trackProgress['range'] = nextExp
self.trackProgress['value'] = curExp
if curExp >= ToontownBattleGlobals.regMaxSkill:
str = TTLocalizer.InventoryPageTrackFull % trackName
trackText = TTLocalizer.InventoryUberTrackExp % {'nextExp': ToontownBattleGlobals.MaxSkill - curExp}
self.trackProgress['range'] = ToontownBattleGlobals.UberSkill
uberCurrExp = curExp - ToontownBattleGlobals.regMaxSkill
self.trackProgress['value'] = uberCurrExp
else:
morePoints = nextExp - curExp
if morePoints == 1:
str = TTLocalizer.InventoryPageSinglePoint % {'trackName': trackName,
'numPoints': morePoints}
else:
str = TTLocalizer.InventoryPagePluralPoints % {'trackName': trackName,
'numPoints': morePoints}
self.trackInfo['text'] = str
self.trackProgress['text'] = trackText
self.trackProgress['frameColor'] = (ToontownBattleGlobals.TrackColors[trackIndex][0] * 0.6,
ToontownBattleGlobals.TrackColors[trackIndex][1] * 0.6,
ToontownBattleGlobals.TrackColors[trackIndex][2] * 0.6,
1)
self.trackProgress['barColor'] = (ToontownBattleGlobals.TrackColors[trackIndex][0],
ToontownBattleGlobals.TrackColors[trackIndex][1],
ToontownBattleGlobals.TrackColors[trackIndex][2],
1)
self.trackProgress.show()
else:
str = TTLocalizer.InventoryPageNoAccess % trackName
self.trackInfo['text'] = str
self.trackProgress.hide()
def clearTrackInfo(self, trackIndex):
if self.currentTrackInfo == trackIndex:
self.trackInfo['text'] = ''
self.trackProgress.hide()
self.currentTrackInfo = None
return
def acceptOnscreenHooks(self):
self.accept(ToontownGlobals.InventoryHotkeyOn, self.showInventoryOnscreen)
self.accept(ToontownGlobals.InventoryHotkeyOff, self.hideInventoryOnscreen)
def ignoreOnscreenHooks(self):
self.ignore(ToontownGlobals.InventoryHotkeyOn)
self.ignore(ToontownGlobals.InventoryHotkeyOff)
def showInventoryOnscreen(self):
messenger.send('wakeup')
timedif = globalClock.getRealTime() - self.lastInventoryTime
if timedif < 0.7:
return
self.lastInventoryTime = globalClock.getRealTime()
if self.onscreen or base.localAvatar.questPage.onscreen:
return
self.onscreen = 1
base.localAvatar.inventory.setActivateMode('book')
base.localAvatar.inventory.show()
base.localAvatar.inventory.reparentTo(self)
self.moneyDisplay['text'] = str(base.localAvatar.getMoney())
self.accept('enterTrackFrame', self.updateTrackInfo)
self.accept('exitTrackFrame', self.clearTrackInfo)
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.cogMenu.update()
self.reparentTo(aspect2d)
self.cogMenu.show()
self.title.hide()
self.show()
def hideInventoryOnscreen(self):
if not self.onscreen:
return
self.onscreen = 0
self.ignore('enterTrackFrame')
self.ignore('exitTrackFrame')
self.ignore(localAvatar.uniqueName('moneyChange'))
base.localAvatar.inventory.hide()
base.localAvatar.inventory.reparentTo(hidden)
self.reparentTo(self.book)
self.cogMenu.hide()
self.title.show()
self.hide()
```
#### File: toontown/suit/DistributedLawbotBossAI.py
```python
from otp.ai.AIBaseGlobal import *
from direct.distributed.ClockDelta import *
from otp.ai.MagicWordGlobal import *
from . import DistributedBossCogAI
from direct.directnotify import DirectNotifyGlobal
from otp.avatar import DistributedAvatarAI
from . import DistributedSuitAI
from toontown.battle import BattleExperienceAI
from direct.fsm import FSM
from toontown.toonbase import ToontownGlobals
from toontown.toon import InventoryBase
from toontown.toonbase import TTLocalizer
from toontown.battle import BattleBase
from toontown.toon import NPCToons
from toontown.building import SuitBuildingGlobals
from . import SuitDNA
import random
from toontown.coghq import DistributedLawbotBossGavelAI
from toontown.suit import DistributedLawbotBossSuitAI
from toontown.coghq import DistributedLawbotCannonAI
from toontown.coghq import DistributedLawbotChairAI
from toontown.toonbase import ToontownBattleGlobals
import math
class DistributedLawbotBossAI(DistributedBossCogAI.DistributedBossCogAI, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBossAI')
limitHitCount = 6
hitCountDamage = 35
numPies = 10
maxToonLevels = 77
def __init__(self, air):
DistributedBossCogAI.DistributedBossCogAI.__init__(self, air, 'l')
FSM.FSM.__init__(self, 'DistributedLawbotBossAI')
self.lawyers = []
self.cannons = None
self.chairs = None
self.gavels = None
self.cagedToonNpcId = random.choice(list(NPCToons.npcFriends.keys()))
self.bossMaxDamage = ToontownGlobals.LawbotBossMaxDamage
self.recoverRate = 0
self.recoverStartTime = 0
self.bossDamage = ToontownGlobals.LawbotBossInitialDamage
self.useCannons = 1
self.numToonJurorsSeated = 0
self.cannonBallsLeft = {}
self.toonLevels = 0
if 'Defeat' not in self.keyStates:
self.keyStates.append('Defeat')
self.toonupValue = 1
self.bonusState = False
self.bonusTimeStarted = 0
self.numBonusStates = 0
self.battleThreeTimeStarted = 0
self.battleThreeTimeInMin = 0
self.numAreaAttacks = 0
self.lastAreaAttackTime = 0
self.weightPerToon = {}
self.cannonIndexPerToon = {}
self.battleDifficulty = 0
return
def delete(self):
self.notify.debug('DistributedLawbotBossAI.delete')
self.__deleteBattleThreeObjects()
self.__deleteBattleTwoObjects()
taskName = self.uniqueName('clearBonus')
taskMgr.remove(taskName)
return DistributedBossCogAI.DistributedBossCogAI.delete(self)
def getHoodId(self):
return ToontownGlobals.LawbotHQ
def getCagedToonNpcId(self):
return self.cagedToonNpcId
def magicWordHit(self, damage, avId):
if self.attackCode != ToontownGlobals.BossCogDizzyNow:
self.hitBossInsides()
self.hitBoss(damage)
def hitBoss(self, bossDamage):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitBoss from unknown avatar'):
return
self.validate(avId, bossDamage == 1, 'invalid bossDamage %s' % bossDamage)
if bossDamage < 1:
return
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
if bossDamage <= 12:
newWeight = self.weightPerToon.get(avId)
if newWeight:
bossDamage = newWeight
if self.bonusState and bossDamage <= 12:
bossDamage *= ToontownGlobals.LawbotBossBonusWeightMultiplier
bossDamage = min(self.getBossDamage() + bossDamage, self.bossMaxDamage)
self.b_setBossDamage(bossDamage, 0, 0)
if self.bossDamage >= self.bossMaxDamage:
self.b_setState('Victory')
else:
self.__recordHit()
def healBoss(self, bossHeal):
bossDamage = -bossHeal
avId = self.air.getAvatarIdFromSender()
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
bossDamage = min(self.getBossDamage() + bossDamage, self.bossMaxDamage)
bossDamage = max(bossDamage, 0)
self.b_setBossDamage(bossDamage, 0, 0)
if self.bossDamage == 0:
self.b_setState('Defeat')
else:
self.__recordHit()
def hitBossInsides(self):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitBossInsides from unknown avatar'):
return
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
self.b_setAttackCode(ToontownGlobals.BossCogDizzyNow)
self.b_setBossDamage(self.getBossDamage(), 0, 0)
def hitToon(self, toonId):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId != toonId, 'hitToon on self'):
return
if avId not in self.involvedToons or toonId not in self.involvedToons:
return
toon = self.air.doId2do.get(toonId)
if toon:
self.healToon(toon, self.toonupValue)
self.sendUpdate('toonGotHealed', [toonId])
def touchCage(self):
avId = self.air.getAvatarIdFromSender()
currState = self.getCurrentOrNextState()
if currState != 'BattleThree' and currState != 'NearVictory':
return
if not self.validate(avId, avId in self.involvedToons, 'touchCage from unknown avatar'):
return
toon = simbase.air.doId2do.get(avId)
if toon:
toon.b_setNumPies(self.numPies)
toon.__touchedCage = 1
def touchWitnessStand(self):
self.touchCage()
def finalPieSplat(self):
self.notify.debug('finalPieSplat')
if self.state != 'NearVictory':
return
self.b_setState('Victory')
def doTaunt(self):
if not self.state == 'BattleThree':
return
tauntIndex = random.randrange(len(TTLocalizer.LawbotBossTaunts))
extraInfo = 0
if tauntIndex == 0 and self.involvedToons:
extraInfo = random.randrange(len(self.involvedToons))
self.sendUpdate('setTaunt', [tauntIndex, extraInfo])
def doNextAttack(self, task):
for lawyer in self.lawyers:
lawyer.doNextAttack(self)
self.waitForNextAttack(ToontownGlobals.LawbotBossLawyerCycleTime)
timeSinceLastAttack = globalClock.getFrameTime() - self.lastAreaAttackTime
allowedByTime = 15 < timeSinceLastAttack or self.lastAreaAttackTime == 0
doAttack = random.randrange(1,101)
self.notify.debug('allowedByTime=%d doAttack=%d' % (allowedByTime, doAttack))
if doAttack <= ToontownGlobals.LawbotBossChanceToDoAreaAttack and allowedByTime:
self.__doAreaAttack()
self.numAreaAttacks += 1
self.lastAreaAttackTime = globalClock.getFrameTime()
else:
chanceToDoTaunt = ToontownGlobals.LawbotBossChanceForTaunt
action = random.randrange(1,101)
if action <= chanceToDoTaunt:
self.doTaunt()
pass
return
if self.attackCode == ToontownGlobals.BossCogDizzyNow:
attackCode = ToontownGlobals.BossCogRecoverDizzyAttack
else:
attackCode = random.choice([ToontownGlobals.BossCogAreaAttack,
ToontownGlobals.BossCogFrontAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack])
if attackCode == ToontownGlobals.BossCogAreaAttack:
self.__doAreaAttack()
elif attackCode == ToontownGlobals.BossCogDirectedAttack:
self.__doDirectedAttack()
else:
self.b_setAttackCode(attackCode)
def __doAreaAttack(self):
self.b_setAttackCode(ToontownGlobals.BossCogAreaAttack)
def __doDirectedAttack(self):
if self.nearToons:
toonId = random.choice(self.nearToons)
self.b_setAttackCode(ToontownGlobals.BossCogDirectedAttack, toonId)
else:
self.__doAreaAttack()
def b_setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
self.d_setBossDamage(bossDamage, recoverRate, recoverStartTime)
self.setBossDamage(bossDamage, recoverRate, recoverStartTime)
def setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
self.bossDamage = bossDamage
self.recoverRate = recoverRate
self.recoverStartTime = recoverStartTime
def getBossDamage(self):
now = globalClock.getFrameTime()
elapsed = now - self.recoverStartTime
return int(max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0))
def d_setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
timestamp = globalClockDelta.localToNetworkTime(recoverStartTime)
self.sendUpdate('setBossDamage', [bossDamage, recoverRate, timestamp])
def waitForNextStrafe(self, delayTime):
currState = self.getCurrentOrNextState()
if currState == 'BattleThree':
taskName = self.uniqueName('NextStrafe')
taskMgr.remove(taskName)
taskMgr.doMethodLater(delayTime, self.doNextStrafe, taskName)
def stopStrafes(self):
taskName = self.uniqueName('NextStrafe')
taskMgr.remove(taskName)
def doNextStrafe(self, task):
if self.attackCode != ToontownGlobals.BossCogDizzyNow:
side = random.choice([0, 1])
direction = random.choice([0, 1])
self.sendUpdate('doStrafe', [side, direction])
delayTime = 9
self.waitForNextStrafe(delayTime)
def __sendLawyerIds(self):
lawyerIds = []
for suit in self.lawyers:
lawyerIds.append(suit.doId)
self.sendUpdate('setLawyerIds', [lawyerIds])
def d_cagedToonBattleThree(self, index, avId):
self.sendUpdate('cagedToonBattleThree', [index, avId])
def formatReward(self):
return str(self.cagedToonNpcId)
def makeBattleOneBattles(self):
self.postBattleState = 'RollToBattleTwo'
self.initializeBattles(1, ToontownGlobals.LawbotBossBattleOnePosHpr)
def generateSuits(self, battleNumber):
if battleNumber == 1:
weakenedValue = ((1, 1),
(2, 2),
(2, 2),
(1, 1),
(1, 1, 1, 1, 1))
listVersion = list(SuitBuildingGlobals.SuitBuildingInfo)
if simbase.config.GetBool('lawbot-boss-cheat', 0):
listVersion[13] = weakenedValue
SuitBuildingGlobals.SuitBuildingInfo = tuple(listVersion)
return self.invokeSuitPlanner(13, 0)
else:
return self.invokeSuitPlanner(13, 1)
def removeToon(self, avId):
toon = simbase.air.doId2do.get(avId)
if toon:
toon.b_setNumPies(0)
DistributedBossCogAI.DistributedBossCogAI.removeToon(self, avId)
def enterOff(self):
self.notify.debug('enterOff')
DistributedBossCogAI.DistributedBossCogAI.enterOff(self)
self.__deleteBattleThreeObjects()
self.__resetLawyers()
def enterElevator(self):
self.notify.debug('enterElevatro')
DistributedBossCogAI.DistributedBossCogAI.enterElevator(self)
self.b_setBossDamage(ToontownGlobals.LawbotBossInitialDamage, 0, 0)
def enterIntroduction(self):
self.notify.debug('enterIntroduction')
DistributedBossCogAI.DistributedBossCogAI.enterIntroduction(self)
self.b_setBossDamage(ToontownGlobals.LawbotBossInitialDamage, 0, 0)
self.__makeChairs()
def exitIntroduction(self):
self.notify.debug('exitIntroduction')
DistributedBossCogAI.DistributedBossCogAI.exitIntroduction(self)
def enterRollToBattleTwo(self):
self.divideToons()
self.__makeCannons()
self.barrier = self.beginBarrier('RollToBattleTwo', self.involvedToons, 50, self.__doneRollToBattleTwo)
def __doneRollToBattleTwo(self, avIds):
self.b_setState('PrepareBattleTwo')
def exitRollToBattleTwo(self):
self.ignoreBarrier(self.barrier)
def enterPrepareBattleTwo(self):
self.__makeCannons()
self.barrier = self.beginBarrier('PrepareBattleTwo', self.involvedToons, 45, self.__donePrepareBattleTwo)
self.makeBattleTwoBattles()
def __donePrepareBattleTwo(self, avIds):
self.b_setState('BattleTwo')
def exitPrepareBattleTwo(self):
self.ignoreBarrier(self.barrier)
def __makeCannons(self):
if self.cannons == None:
self.cannons = []
startPt = Point3(*ToontownGlobals.LawbotBossCannonPosA)
endPt = Point3(*ToontownGlobals.LawbotBossCannonPosB)
totalDisplacement = endPt - startPt
self.notify.debug('totalDisplacement=%s' % totalDisplacement)
numToons = len(self.involvedToons)
stepDisplacement = totalDisplacement / (numToons + 1)
for index in range(numToons):
newPos = stepDisplacement * (index + 1)
self.notify.debug('curDisplacement = %s' % newPos)
newPos += startPt
self.notify.debug('newPos = %s' % newPos)
cannon = DistributedLawbotCannonAI.DistributedLawbotCannonAI(self.air, self, index, newPos[0], newPos[1], newPos[2], -90, 0, 0)
cannon.generateWithRequired(self.zoneId)
self.cannons.append(cannon)
return
def __makeChairs(self):
if self.chairs == None:
self.chairs = []
for index in range(12):
chair = DistributedLawbotChairAI.DistributedLawbotChairAI(self.air, self, index)
chair.generateWithRequired(self.zoneId)
self.chairs.append(chair)
return
def __makeBattleTwoObjects(self):
self.__makeCannons()
self.__makeChairs()
def __deleteCannons(self):
if self.cannons != None:
for cannon in self.cannons:
cannon.requestDelete()
self.cannons = None
return
def __deleteChairs(self):
if self.chairs != None:
for chair in self.chairs:
chair.requestDelete()
self.chairs = None
return
def __stopChairs(self):
if self.chairs != None:
for chair in self.chairs:
chair.stopCogs()
return
def __deleteBattleTwoObjects(self):
self.__deleteCannons()
self.__deleteChairs()
def getCannonBallsLeft(self, avId):
if avId in self.cannonBallsLeft:
return self.cannonBallsLeft[avId]
else:
self.notify.warning('getCannonBalsLeft invalid avId: %d' % avId)
return 0
def decrementCannonBallsLeft(self, avId):
if avId in self.cannonBallsLeft:
self.cannonBallsLeft[avId] -= 1
if self.cannonBallsLeft[avId] < 0:
self.notify.warning('decrementCannonBallsLeft <0 cannonballs for %d' % avId)
self.cannonBallsLeft[avId] = 0
else:
self.notify.warning('decrementCannonBallsLeft invalid avId: %d' % avId)
def makeBattleTwoBattles(self):
self.postBattleState = 'RollToBattleThree'
if self.useCannons:
self.__makeBattleTwoObjects()
else:
self.initializeBattles(2, ToontownGlobals.LawbotBossBattleTwoPosHpr)
def enterBattleTwo(self):
if self.useCannons:
self.cannonBallsLeft = {}
for toonId in self.involvedToons:
self.cannonBallsLeft[toonId] = ToontownGlobals.LawbotBossCannonBallMax
for chair in self.chairs:
chair.requestEmptyJuror()
self.barrier = self.beginBarrier('BattleTwo', self.involvedToons, ToontownGlobals.LawbotBossJuryBoxMoveTime + 1, self.__doneBattleTwo)
if not self.useCannons:
if self.battleA:
self.battleA.startBattle(self.toonsA, self.suitsA)
if self.battleB:
self.battleB.startBattle(self.toonsB, self.suitsB)
def __doneBattleTwo(self, avIds):
if self.useCannons:
self.b_setState('PrepareBattleThree')
else:
self.b_setState('RollToBattleThree')
def exitBattleTwo(self):
self.resetBattles()
self.numToonJurorsSeated = 0
for chair in self.chairs:
self.notify.debug('chair.state==%s' % chair.state)
if chair.state == 'ToonJuror':
self.numToonJurorsSeated += 1
self.notify.debug('numToonJurorsSeated=%d' % self.numToonJurorsSeated)
self.air.writeServerEvent('jurorsSeated', self.doId, '%s|%s|%s' % (self.dept, self.involvedToons, self.numToonJurorsSeated))
self.__deleteCannons()
self.__stopChairs()
def enterRollToBattleThree(self):
self.divideToons()
self.barrier = self.beginBarrier('RollToBattleThree', self.involvedToons, 20, self.__doneRollToBattleThree)
def __doneRollToBattleThree(self, avIds):
self.b_setState('PrepareBattleThree')
def exitRollToBattleThree(self):
self.ignoreBarrier(self.barrier)
def enterPrepareBattleThree(self):
self.calcAndSetBattleDifficulty()
self.barrier = self.beginBarrier('PrepareBattleThree', self.involvedToons, 45, self.__donePrepareBattleThree)
def __donePrepareBattleThree(self, avIds):
self.b_setState('BattleThree')
def exitPrepareBattleThree(self):
self.ignoreBarrier(self.barrier)
def enterBattleThree(self):
self.battleThreeTimeStarted = globalClock.getFrameTime()
self.calcAndSetBattleDifficulty()
self.calculateWeightPerToon()
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
self.ammoCount = diffSettings[0]
self.numGavels = diffSettings[1]
if self.numGavels >= len(ToontownGlobals.LawbotBossGavelPosHprs):
self.numGavels = len(ToontownGlobals.LawbotBossGavelPosHprs)
self.numLawyers = diffSettings[2]
if self.numLawyers >= len(ToontownGlobals.LawbotBossLawyerPosHprs):
self.numLawyers = len(ToontownGlobals.LawbotBossLawyerPosHprs)
self.toonupValue = diffSettings[3]
self.notify.debug('diffLevel=%d ammoCount=%d gavels=%d lawyers = %d, toonup=%d' % (self.battleDifficulty,
self.ammoCount,
self.numGavels,
self.numLawyers,
self.toonupValue))
self.air.writeServerEvent('lawbotBossSettings', self.doId, '%s|%s|%s|%s|%s|%s' % (self.dept,
self.battleDifficulty,
self.ammoCount,
self.numGavels,
self.numLawyers,
self.toonupValue))
self.__makeBattleThreeObjects()
self.__makeLawyers()
self.numPies = self.ammoCount
self.resetBattles()
self.setPieType()
jurorsOver = self.numToonJurorsSeated - ToontownGlobals.LawbotBossJurorsForBalancedScale
dmgAdjust = jurorsOver * ToontownGlobals.LawbotBossDamagePerJuror
self.b_setBossDamage(ToontownGlobals.LawbotBossInitialDamage + dmgAdjust, 0, 0)
if simbase.config.GetBool('lawbot-boss-cheat', 0):
self.b_setBossDamage(ToontownGlobals.LawbotBossMaxDamage - 1, 0, 0)
self.battleThreeStart = globalClock.getFrameTime()
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.__touchedCage = 0
for aGavel in self.gavels:
aGavel.turnOn()
self.waitForNextAttack(5)
self.notify.debug('battleDifficulty = %d' % self.battleDifficulty)
self.numToonsAtStart = len(self.involvedToons)
def __saySomething(self, task = None):
index = None
avId = 0
if len(self.involvedToons) == 0:
return
avId = random.choice(self.involvedToons)
toon = simbase.air.doId2do.get(avId)
if toon.__touchedCage:
if self.cagedToonDialogIndex <= TTLocalizer.CagedToonBattleThreeMaxAdvice:
index = self.cagedToonDialogIndex
self.cagedToonDialogIndex += 1
elif random.random() < 0.2:
index = random.randrange(100, TTLocalizer.CagedToonBattleThreeMaxAdvice + 1)
else:
index = random.randrange(20, TTLocalizer.CagedToonBattleThreeMaxTouchCage + 1)
if index:
self.d_cagedToonBattleThree(index, avId)
self.__saySomethingLater()
return
def __saySomethingLater(self, delayTime = 15):
taskName = self.uniqueName('CagedToonSaySomething')
taskMgr.remove(taskName)
taskMgr.doMethodLater(delayTime, self.__saySomething, taskName)
def __goodJump(self, avId):
currState = self.getCurrentOrNextState()
if currState != 'BattleThree':
return
index = random.randrange(10, TTLocalizer.CagedToonBattleThreeMaxGivePies + 1)
self.d_cagedToonBattleThree(index, avId)
self.__saySomethingLater()
def __makeBattleThreeObjects(self):
if self.gavels == None:
self.gavels = []
for index in range(self.numGavels):
gavel = DistributedLawbotBossGavelAI.DistributedLawbotBossGavelAI(self.air, self, index)
gavel.generateWithRequired(self.zoneId)
self.gavels.append(gavel)
return
def __deleteBattleThreeObjects(self):
if self.gavels != None:
for gavel in self.gavels:
gavel.request('Off')
gavel.requestDelete()
self.gavels = None
return
def doBattleThreeInfo(self):
didTheyWin = 0
if self.bossDamage == ToontownGlobals.LawbotBossMaxDamage:
didTheyWin = 1
self.battleThreeTimeInMin = globalClock.getFrameTime() - self.battleThreeTimeStarted
self.battleThreeTimeInMin /= 60.0
self.numToonsAtEnd = 0
toonHps = []
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
self.numToonsAtEnd += 1
toonHps.append(toon.hp)
self.air.writeServerEvent('b3Info', self.doId, '%d|%.2f|%d|%d|%d|%d|%d|%d|%d|%d|%d|%d|%s|%s' % (didTheyWin,
self.battleThreeTimeInMin,
self.numToonsAtStart,
self.numToonsAtEnd,
self.numToonJurorsSeated,
self.battleDifficulty,
self.ammoCount,
self.numGavels,
self.numLawyers,
self.toonupValue,
self.numBonusStates,
self.numAreaAttacks,
toonHps,
self.weightPerToon))
def exitBattleThree(self):
self.doBattleThreeInfo()
self.stopAttacks()
self.stopStrafes()
taskName = self.uniqueName('CagedToonSaySomething')
taskMgr.remove(taskName)
self.__resetLawyers()
self.__deleteBattleThreeObjects()
def enterNearVictory(self):
self.resetBattles()
def exitNearVictory(self):
pass
def enterVictory(self):
self.resetBattles()
self.suitsKilled.append({'type': None,
'level': None,
'track': self.dna.dept,
'isSkelecog': 0,
'isForeman': 0,
'isVP': 1,
'isCFO': 0,
'isSupervisor': 0,
'isVirtual': 0,
'activeToons': self.involvedToons[:]})
self.barrier = self.beginBarrier('Victory', self.involvedToons, 30, self.__doneVictory)
return
def __doneVictory(self, avIds):
self.d_setBattleExperience()
self.b_setState('Reward')
BattleExperienceAI.assignRewards(self.involvedToons, self.toonSkillPtsGained, self.suitsKilled, ToontownGlobals.dept2cogHQ(self.dept), self.helpfulToons)
preferredDept = random.randrange(len(SuitDNA.suitDepts))
typeWeights = ['single'] * 70 + ['building'] * 27 + ['invasion'] * 3
preferredSummonType = random.choice(typeWeights)
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
self.giveCogSummonReward(toon, preferredDept, preferredSummonType)
toon.b_promote(self.deptIndex)
def giveCogSummonReward(self, toon, prefDeptIndex, prefSummonType):
cogLevel = self.toonLevels - 1
deptIndex = prefDeptIndex
summonType = prefSummonType
hasSummon = toon.hasParticularCogSummons(prefDeptIndex, cogLevel, prefSummonType)
self.notify.debug('trying to find another reward')
if not toon.hasParticularCogSummons(prefDeptIndex, cogLevel, 'single'):
summonType = 'single'
elif not toon.hasParticularCogSummons(prefDeptIndex, cogLevel, 'building'):
summonType = 'building'
elif not toon.hasParticularCogSummons(prefDeptIndex, cogLevel, 'invasion'):
summonType = 'invasion'
else:
foundOne = False
for curDeptIndex in range(len(SuitDNA.suitDepts)):
if not toon.hasParticularCogSummons(curDeptIndex, cogLevel, prefSummonType):
deptIndex = curDeptIndex
foundOne = True
break
elif not toon.hasParticularCogSummons(curDeptIndex, cogLevel, 'single'):
deptIndex = curDeptIndex
summonType = 'single'
foundOne = True
break
elif not toon.hasParticularCogSummons(curDeptIndex, cogLevel, 'building'):
deptIndex = curDeptIndex
summonType = 'building'
foundOne = True
break
elif not toon.hasParticularCogSummons(curDeptIndex, cogLevel, 'invasion'):
summonType = 'invasion'
deptIndex = curDeptIndex
foundOne = True
break
possibleCogLevel = list(range(SuitDNA.suitsPerDept))
possibleDeptIndex = list(range(len(SuitDNA.suitDepts)))
possibleSummonType = ['single', 'building', 'invasion']
typeWeights = ['single'] * 70 + ['building'] * 27 + ['invasion'] * 3
if not foundOne:
for i in range(5):
randomCogLevel = random.choice(possibleCogLevel)
randomSummonType = random.choice(typeWeights)
randomDeptIndex = random.choice(possibleDeptIndex)
if not toon.hasParticularCogSummons(randomDeptIndex, randomCogLevel, randomSummonType):
foundOne = True
cogLevel = randomCogLevel
summonType = randomSummonType
deptIndex = randomDeptIndex
break
for curType in possibleSummonType:
if foundOne:
break
for curCogLevel in possibleCogLevel:
if foundOne:
break
for curDeptIndex in possibleDeptIndex:
if foundOne:
break
if not toon.hasParticularCogSummons(curDeptIndex, curCogLevel, curType):
foundOne = True
cogLevel = curCogLevel
summonType = curType
deptIndex = curDeptIndex
if not foundOne:
cogLevel = None
summonType = None
deptIndex = None
toon.assignNewCogSummons(cogLevel, summonType, deptIndex)
return
def exitVictory(self):
self.takeAwayPies()
def enterDefeat(self):
self.resetBattles()
self.barrier = self.beginBarrier('Defeat', self.involvedToons, 10, self.__doneDefeat)
def __doneDefeat(self, avIds):
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
toon.b_setHp(0)
def exitDefeat(self):
self.takeAwayPies()
def enterFrolic(self):
DistributedBossCogAI.DistributedBossCogAI.enterFrolic(self)
self.b_setBossDamage(0, 0, 0)
def setPieType(self):
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.d_setPieType(ToontownBattleGlobals.MAX_TRACK_INDEX)
def takeAwayPies(self):
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setNumPies(0)
def __recordHit(self):
now = globalClock.getFrameTime()
self.hitCount += 1
if self.hitCount < self.limitHitCount or self.bossDamage < self.hitCountDamage:
return
def __resetLawyers(self):
for suit in self.lawyers:
suit.requestDelete()
self.lawyers = []
def __makeLawyers(self):
self.__resetLawyers()
lawCogChoices = ['b',
'dt',
'ac',
'bs',
'sd',
'le',
'bw']
for i in range(self.numLawyers):
suit = DistributedLawbotBossSuitAI.DistributedLawbotBossSuitAI(self.air, None)
suit.dna = SuitDNA.SuitDNA()
lawCog = random.choice(lawCogChoices)
suit.dna.newSuit(lawCog)
suit.setPosHpr(*ToontownGlobals.LawbotBossLawyerPosHprs[i])
suit.setBoss(self)
suit.generateWithRequired(self.zoneId)
self.lawyers.append(suit)
self.__sendLawyerIds()
return
def hitChair(self, chairIndex, npcToonIndex):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitChair from unknown avatar'):
return
if not self.chairs:
return
if chairIndex < 0 or chairIndex >= len(self.chairs):
self.notify.warning('invalid chairIndex = %d' % chairIndex)
return
if not self.state == 'BattleTwo':
return
self.chairs[chairIndex].b_setToonJurorIndex(npcToonIndex)
self.chairs[chairIndex].requestToonJuror()
def clearBonus(self, taskName):
if self and hasattr(self, 'bonusState'):
self.bonusState = False
def startBonusState(self):
self.notify.debug('startBonusState')
self.bonusTimeStarted = globalClock.getFrameTime()
self.bonusState = True
self.numBonusStates += 1
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
self.healToon(toon, ToontownGlobals.LawbotBossBonusToonup)
taskMgr.doMethodLater(ToontownGlobals.LawbotBossBonusDuration, self.clearBonus, self.uniqueName('clearBonus'))
self.sendUpdate('enteredBonusState', [])
def areAllLawyersStunned(self):
for lawyer in self.lawyers:
if not lawyer.stunned:
return False
return True
def checkForBonusState(self):
if self.bonusState:
return
if not self.areAllLawyersStunned():
return
curTime = globalClock.getFrameTime()
delta = curTime - self.bonusTimeStarted
if ToontownGlobals.LawbotBossBonusWaitTime < delta:
self.startBonusState()
def toonEnteredCannon(self, toonId, cannonIndex):
self.cannonIndexPerToon[toonId] = cannonIndex
def numJurorsSeatedByCannon(self, cannonIndex):
retVal = 0
for chair in self.chairs:
if chair.state == 'ToonJuror':
if chair.toonJurorIndex == cannonIndex:
retVal += 1
return retVal
def calculateWeightPerToon(self):
for toonId in self.involvedToons:
defaultWeight = 1
bonusWeight = 0
cannonIndex = self.cannonIndexPerToon.get(toonId)
if not cannonIndex == None:
diffSettings = ToontownGlobals.LawbotBossDifficultySettings[self.battleDifficulty]
if diffSettings[4]:
bonusWeight = self.numJurorsSeatedByCannon(cannonIndex) - diffSettings[5]
if bonusWeight < 0:
bonusWeight = 0
newWeight = defaultWeight + bonusWeight
self.weightPerToon[toonId] = newWeight
self.notify.debug('toon %d has weight of %d' % (toonId, newWeight))
def b_setBattleDifficulty(self, batDiff):
self.setBattleDifficulty(batDiff)
self.d_setBattleDifficulty(batDiff)
def setBattleDifficulty(self, batDiff):
self.battleDifficulty = batDiff
def d_setBattleDifficulty(self, batDiff):
self.sendUpdate('setBattleDifficulty', [batDiff])
def calcAndSetBattleDifficulty(self):
self.toonLevels = self.getToonDifficulty()
self.b_setBattleDifficulty(self.toonLevels)
@magicWord(category=CATEGORY_PROGRAMMER)
def skipCJ():
"""
Skips to the final round of the CJ.
"""
invoker = spellbook.getInvoker()
boss = None
for do in list(simbase.air.doId2do.values()):
if isinstance(do, DistributedLawbotBossAI):
if invoker.doId in do.involvedToons:
boss = do
break
if not boss:
return "You aren't in a CJ!"
if boss.state in ('PrepareBattleThree', 'BattleThree'):
return "You can't skip this round."
boss.exitIntroduction()
boss.b_setState('PrepareBattleThree')
@magicWord(category=CATEGORY_PROGRAMMER)
def killCJ():
"""
Kills the CJ.
"""
invoker = spellbook.getInvoker()
boss = None
for do in list(simbase.air.doId2do.values()):
if isinstance(do, DistributedLawbotBossAI):
if invoker.doId in do.involvedToons:
boss = do
break
if not boss:
return "You aren't in a CJ"
boss.b_setState('Victory')
return 'Killed CJ.'
```
#### File: toontown/town/BRTownLoader.py
```python
from toontown.battle import BattleParticles
from toontown.suit import Suit
from toontown.town import BRStreet
from toontown.town import TownLoader
class BRTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = BRStreet.BRStreet
self.musicFile = 'phase_8/audio/bgm/TB_SZ.ogg'
self.activityMusicFile = 'phase_8/audio/bgm/TB_SZ_activity.ogg'
self.townStorageDNAFile = 'phase_8/dna/storage_BR_town.dna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(3)
dnaFile = 'phase_8/dna/the_burrrgh_' + str(self.canonicalBranchZone) + '.dna'
self.createHood(dnaFile)
self.windSound = list(map(base.loader.loadSfx, ['phase_8/audio/sfx/SZ_TB_wind_1.ogg',
'phase_8/audio/sfx/SZ_TB_wind_2.ogg',
'phase_8/audio/sfx/SZ_TB_wind_3.ogg']))
self.snow = BattleParticles.loadParticleFile('snowdisk.ptf')
self.snow.setPos(0, 0, 5)
self.snowRender = self.geom.attachNewNode('snowRender')
self.snowRender.setDepthWrite(0)
self.snowRender.setBin('fixed', 1)
def unload(self):
TownLoader.TownLoader.unload(self)
Suit.unloadSuits(3)
del self.windSound
del self.snow
del self.snowRender
def enter(self, requestStatus):
TownLoader.TownLoader.enter(self, requestStatus)
self.snow.start(camera, self.snowRender)
def exit(self):
TownLoader.TownLoader.exit(self)
self.snow.cleanup()
self.snowRender.removeNode()
``` |
{
"source": "Journeyfu/Models",
"score": 2
} |
#### File: layers/det/cascade_rcnn.py
```python
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class CascadeRCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.rpn_in_channels = cfg.fpn_out_channels
self.num_fc = cfg.num_fc
self.fc_dim = cfg.fc_dim
# cascade parameter
self.num_cascade_stages = cfg.num_cascade_stages
self.cascade_head_ious = cfg.cascade_head_ious
self.stage_loss_weights = cfg.stage_loss_weights
self.box_predictor = list()
self.box2box_transform = []
self.proposal_matchers = []
self.box_head = list()
self.scale_grad = layers.ScaleGradient()
for k in range(self.num_cascade_stages):
box_head_i = FastRCNNFCHEAD(cfg)
box_pred_i = FastRCNNOutputLayers(cfg)
self.box_head.append(box_head_i)
self.box_predictor.append(box_pred_i)
self.box2box_transform.append(
layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std))
if k == 0:
self.proposal_matchers.append(None)
else:
self.proposal_matchers.append(layers.Matcher([
self.cascade_head_ious[k]], [0, 1], allow_low_quality_matches=False)
)
def match_and_label_boxes(self, boxes, stage_idx, im_info, gt_targets):
# boxes: L, 1+4
gt_classes_list = []
gt_boxes_list = []
for bs_i, (info_per_image, targets_per_image) in enumerate(zip(im_info, gt_targets)):
num_valid_instance = int(info_per_image[4])
valid_targets_per_image = targets_per_image[:num_valid_instance, :4]
valid_labels_per_image = targets_per_image[:num_valid_instance, 4] # 0, 1-5
proposals_per_image = boxes[boxes[:, 0] == bs_i][:, 1:]
match_quality_matrix = layers.get_iou(valid_targets_per_image, proposals_per_image)
# proposal_labels are 0 or 1 表示没有/有匹配上
matched_idxs, proposal_labels = self.proposal_matchers[stage_idx](match_quality_matrix)
if len(targets_per_image) > 0:
gt_classes = valid_labels_per_image[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=0)
gt_classes[proposal_labels == 0] = 0
gt_boxes = valid_targets_per_image[matched_idxs]
else:
gt_classes = F.zeros_like(matched_idxs)
gt_boxes = F.zeros((len(proposals_per_image), 4), dtype=gt_classes.dtype)
gt_classes_list.append(gt_classes)
gt_boxes_list.append(gt_boxes)
return F.concat(gt_classes_list, axis=0).detach(), F.concat(gt_boxes_list, axis=0).detach()
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
if self.training:
losses = self.cascade_forward(fpn_fms, rcnn_rois, im_info,
(labels, bbox_targets), gt_boxes)
return losses
else:
pred_bbox, pred_scores = self.cascade_forward(fpn_fms, rcnn_rois, im_info)
return pred_bbox, pred_scores
def cascade_forward(self, fpn_fms, rcnn_rois, im_info, proposals_info=None, gt_targets=None):
head_outputs = []
image_sizes = [(int(x[0]), int(x[1])) for x in im_info]
for k in range(self.num_cascade_stages):
if k > 0:
rcnn_rois, non_empty_masks = self.create_proposals_from_boxes(
head_outputs[-1].predict_boxes_with_split(), image_sizes
)
if self.training:
proposals_info = self.match_and_label_boxes(rcnn_rois, k, im_info, gt_targets)
head_outputs.append(self.run_stage(fpn_fms, rcnn_rois, k, image_sizes, proposals_info))
if self.training:
losses = {}
for stage, output in enumerate(head_outputs):
stage_losses = output.losses()
losses.update({
k + "_stage{}".format(stage): v * self.stage_loss_weights[stage]
for k, v in stage_losses.items()
})
return losses
else:
scores_per_stage = [h.predict_probs() for h in head_outputs]
pred_scores = [
sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
for scores_per_image in zip(*scores_per_stage)
][0]
pred_bbox = head_outputs[-1].predict_boxes()
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(gt_boxes.shape[0]):
num_valid_boxes = im_info[bid, 4].astype("int32")
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = F.full((gt_boxes_per_img.shape[0], 1), bid)
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois[batch_roi_mask], gt_rois])
overlaps = layers.get_iou(all_rois[:, 1:], gt_boxes_per_img)
max_overlaps = overlaps.max(axis=1)
gt_assignment = F.argmax(overlaps, axis=1).astype("int32")
labels = gt_boxes_per_img[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) & (labels >= 0)
bg_mask = (
(max_overlaps >= self.cfg.bg_threshold_low)
& (max_overlaps < self.cfg.bg_threshold_high)
)
num_fg_rois = int(self.cfg.num_rois * self.cfg.fg_ratio)
fg_inds_mask = layers.sample_labels(fg_mask, num_fg_rois, True, False)
num_bg_rois = int(self.cfg.num_rois - fg_inds_mask.sum())
bg_inds_mask = layers.sample_labels(bg_mask, num_bg_rois, True, False)
labels[bg_inds_mask] = 0 # 背景是0
keep_mask = fg_inds_mask | bg_inds_mask
labels = labels[keep_mask].astype("int32")
rois = all_rois[keep_mask]
bbox_targets = gt_boxes_per_img[gt_assignment[keep_mask], :4]
# bbox_targets = self.box2box_transform[0].encode(rois[:, 1:], target_boxes)
# bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.concat(return_rois, axis=0).detach(),
F.concat(return_labels, axis=0).detach(),
F.concat(return_bbox_targets, axis=0).detach(),
)
def create_proposals_from_boxes(self, boxes, image_sizes):
proposal_boxes = []
non_empty_masks = []
for bs_i, (boxes_per_image, image_size) in enumerate(zip(boxes, image_sizes)):
boxes_per_image = layers.get_clipped_boxes(
boxes_per_image.detach(), image_size
)
bs_ch = F.full((len(boxes_per_image), 1),value=bs_i,
dtype="int32", device=boxes_per_image.device)
boxes_per_image = F.concat([bs_ch, boxes_per_image], axis=1)
if self.training:
# do not filter empty boxes at inference time,
# because the scores from each stage need to be aligned and added later
non_empty_mask = (boxes_per_image[:, 1:] != 0).sum(axis=1) > 0
boxes_per_image = boxes_per_image[non_empty_mask]
non_empty_masks.append(non_empty_mask)
proposal_boxes.append(boxes_per_image)
proposal_boxes = F.concat(proposal_boxes, axis=0)
if self.training:
non_empty_masks = F.concat(non_empty_masks, axis=0)
return proposal_boxes, non_empty_masks
def run_stage(self, fpn_fms, rcnn_rois, stage_idx, image_sizes, proposals_info=None):
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
) # (1024, 256, 7, 7)
pool_features = self.scale_grad(pool_features, mge.tensor(1.0 / self.num_cascade_stages))
box_features = self.box_head[stage_idx](pool_features)
pred_class_logits, pred_proposal_deltas = self.box_predictor[stage_idx](box_features)
# pred_class_logits: 1024, 6
# pred_proposal_deltas: 1024, cfg.num_classes * 4
del box_features
# 根据rcnn_rois 确定每个batch的数量
outputs = FastRCNNOutputs(
self.cfg,
self.box2box_transform[stage_idx],
pred_class_logits,
pred_proposal_deltas,
rcnn_rois,
proposals_info=proposals_info
)
return outputs
class FastRCNNOutputs():
def __init__(self, cfg, bbox_transform, pred_class_logits, pred_proposal_deltas, rcnn_rois,
proposals_info=None):
self.cfg = cfg
self.rcnn_rois = rcnn_rois
self.pred_class_logits = pred_class_logits # 1024, (class+1)
self.pred_proposal_deltas = pred_proposal_deltas # 1024, 4
self.bbox_transform = bbox_transform
bs = int(rcnn_rois[:, 0].max())
self.nums_instance_per_image = [ (rcnn_rois[:, 0]==i).sum() for i in range(bs)]
if proposals_info is not None:
self.gt_labels = proposals_info[0]
self.gt_targets = proposals_info[1]
def losses(self):
return {
"loss_cls": self.softmax_cross_entropy_loss(),
"loss_box_reg": self.smooth_l1_loss(),
}
def smooth_l1_loss(self):
# loss for rcnn regression
# deltas可能为0, 因为roi插入了gt
bbox_deltas = self.bbox_transform.encode(self.rcnn_rois[:, 1:], self.gt_targets).reshape(-1, 4)
pred_offsets = self.pred_proposal_deltas.reshape(-1, 4)
num_samples = self.gt_labels.shape[0]
fg_mask = self.gt_labels > 0
if fg_mask.sum() == 0:
loss_rcnn_bbox = pred_offsets.sum() * 0.
else:
loss_rcnn_bbox = layers.smooth_l1_loss(
pred_offsets[fg_mask],
bbox_deltas[fg_mask],
self.cfg.rcnn_smooth_l1_beta,
).sum() / F.maximum(num_samples, 1)
return loss_rcnn_bbox
def softmax_cross_entropy_loss(self):
return F.loss.cross_entropy(self.pred_class_logits, self.gt_labels, axis=1)
def predict_boxes(self):
pred_offsets = self.pred_proposal_deltas.reshape(-1, 4)
return self.bbox_transform.decode(self.rcnn_rois[:, 1:5], pred_offsets)
def predict_probs(self):
pred_scores = F.softmax(self.pred_class_logits, axis=1)[:, 1:]
return F.split(pred_scores, self.nums_instance_per_image, axis=0)
def predict_boxes_with_split(self):
# megengine的split传入的list的个数=分段数-1
pred_box_delta = self.predict_boxes().detach()
return F.split(pred_box_delta, self.nums_instance_per_image, axis=0)
class FastRCNNFCHEAD(M.Module):
def __init__(self, cfg):
super().__init__()
self.rpn_in_channels = cfg.fpn_out_channels
self.pooling_size = cfg.pooling_size
self.num_fc = cfg.num_fc
self.fc_dim = cfg.fc_dim
in_ch = self.rpn_in_channels * self.pooling_size[0] * self.pooling_size[1]
self.fc_list = list()
for i in range(self.num_fc):
self.fc_list.append(M.Linear(in_ch, self.fc_dim))
in_ch = self.fc_dim
for l in self.fc_list:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, x):
if len(self.fc_list):
for i, layer in enumerate(self.fc_list):
if i == 0:
x = F.flatten(x, start_axis=1)
x = F.relu(layer(x))
return x
class FastRCNNOutputLayers(M.Module):
def __init__(self, cfg):
super().__init__()
# box predictor
self.pred_cls = M.Linear(cfg.fc_dim, cfg.num_classes+1)
self.pred_delta = M.Linear(cfg.fc_dim, 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, x):
pred_logits = self.pred_cls(x)
pred_offsets = self.pred_delta(x)
return pred_logits, pred_offsets
``` |
{
"source": "JourneyG/pymapdl",
"score": 3
} |
#### File: _commands/database/selecting.py
```python
from typing import Optional, Union
from ansys.mapdl.core.mapdl_types import MapdlInt, MapdlFloat
class Selecting:
def allsel(self, labt="", entity="", **kwargs):
"""Selects all entities with a single command.
APDL Command: ALLSEL
Parameters
----------
labt
Type of selection to be made:
ALL - Selects all items of the specified entity type and all
items of lower entity types (default).
BELOW - Selects all items directly associated with and below
the selected items of the specified entity type.
entity
Entity type on which selection is based:
ALL - All entity types (default).
VOLU - Volumes.
AREA - Areas.
LINE - Lines.
KP - Keypoints.
ELEM - Elements.
NODE - Nodes.
Notes
-----
ALLSEL is a convenience command that allows the user to select all
items of a specified entity type or to select items associated with the
selected items of a higher entity.
An entity hierarchy is used to decide what entities will be available
in the selection process. This hierarchy from top to bottom is as
follows: volumes, areas, lines, keypoints, elements, and nodes. The
hierarchy may also be divided into two branches: the solid model and
the finite element model. The label ALL selects items based on one
branch only, while BELOW uses the entire entity hierarchy. For
example, ALLSEL,ALL,VOLU selects all volumes, areas, lines, and
keypoints in the data base. ALLSEL,BELOW,AREA selects all lines
belonging to the selected areas; all keypoints belonging to those
lines; all elements belonging to those areas, lines, and keypoints; and
all nodes belonging to those elements.
The $ character should not be used after the ALLSEL command.
This command is valid in any processor.
Examples
--------
>>> mapdl.allsel()
"""
command = "ALLSEL,%s,%s" % (str(labt), str(entity))
return self.run(command, **kwargs)
def asll(self, type_="", arkey="", **kwargs):
"""Selects those areas containing the selected lines.
APDL Command: ASLL
Parameters
----------
type\_
Label identifying the type of area select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
arkey
Specifies whether all contained area lines must be selected [LSEL]:
0 - Select area if any of its lines are in the selected line set.
1 - Select area only if all of its lines are in the selected line set.
Notes
-----
This command is valid in any processor.
"""
command = "ASLL,%s,%s" % (str(type_), str(arkey))
return self.run(command, **kwargs)
def asel(self, type_="", item="", comp="", vmin="", vmax="", vinc="",
kswp="", **kwargs):
"""Selects a subset of areas.
APDL Command: ASEL
Parameters
----------
type\_
Label identifying the type of select:
S - Select a new set (default)
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ALL - Restore the full set.
NONE - Unselect the full set.
INVE - Invert the current set (selected becomes unselected and vice versa).
STAT - Display the current select status.
Notes
-----
Selects a subset of areas. For example, to select those areas with area
numbers 1 through 7, use
>>> mapdl.asel('S','AREA', '', 1, 7)
The selected subset is then used when the ALL label is entered
(or implied) on other commands, such as
>>> mapdl.alist('ALL')
Only data identified by area number are selected. Data
are flagged as selected and unselected; no data are actually deleted
from the database.
In a cyclic symmetry analysis, area hot spots can be modified.
Consequently, the result of an area selection may be different before
and after the CYCLIC command.
If Item = ACCA, the command selects only those areas that were created
by concatenation. The KSWP field is processed, but the Comp, VMIN,
VMAX, and VINC fields are ignored.
This command is valid in any processor.
For Selects based on non-integer numbers (coordinates, results, etc.),
items that are within the range VMIN-Toler and VMAX+Toler are selected.
The default tolerance Toler is based on the relative values of VMIN and
VMAX as follows:
If VMIN = VMAX, Toler = 0.005 x VMIN.
If VMIN = VMAX = 0.0, Toler = 1.0E-6.
If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX-VMIN).
Use the SELTOL command to override this default and specify Toler
explicitly.
Table: 127:: : ASEL - Valid Item and Component Labels
Examples
--------
Select area(s) at location x == 0. Note that value of seltol is
used since ``vmin == vmax``.
>>> mapdl.asel('S', 'LOC', 'X', 0)
Select areas between ``y == 2`` and ``y == 4``
>>> mapdl.asel('S', 'LOC', 'Y', 2, 4)
"""
command = f"ASEL,{type_},{item},{comp},{vmin},{vmax},{vinc},{kswp}"
return self.run(command, **kwargs)
def aslv(self, type_="", **kwargs):
"""Selects those areas contained in the selected volumes.
APDL Command: ASLV
Parameters
----------
type\_
Label identifying the type of area select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
This command is valid in any processor.
"""
return self.run(f"ASLV,{type_}", **kwargs)
def dofsel(self, type_="", dof1="", dof2="", dof3="", dof4="", dof5="",
dof6="", **kwargs):
"""Selects a DOF label set for reference by other commands.
APDL Command: DOFSEL
Parameters
----------
type\_
Label identifying the type of select:
S - Select a new set of labels.
A - Add labels to the current set.
U - Unselect (remove) labels from the current set.
ALL - Restore the full set of labels.
STAT - Display the current select status.
dof1, dof2, dof3, . . . , dof6
Used only with Type = S, A, or U. Valid structural labels: UX,
UY, or UZ (displacements); U (UX, UY, and UZ); ROTX, ROTY, or ROTZ
(rotations); ROT (ROTX, ROTY, and ROTZ); DISP (U and ROT); HDSP
(Hydrostatic pressure). Valid thermal labels: TEMP, TBOT, TE2, TE3,
. . ., TTOP (temperature). Valid acoustic labels: PRES
(pressure); UX, UY, or UZ (displacements for FSI coupled elements).
Valid electric labels: VOLT (voltage); EMF (electromotive force
drop); CURR (current). Valid magnetic labels: MAG (scalar
magnetic potential); AX, AY or AZ (vector magnetic potentials); A
(AX, AY and AZ); CURR (current). Valid structural force labels:
FX, FY, or FZ (forces); F (FX, FY, and FZ); MX, MY, or MZ
(moments); M (MX, MY, and MZ); FORC (F and M); DVOL (fluid mass
flow rate). Valid thermal force labels: HEAT, HBOT, HE2, HE3, . .
., HTOP (heat flow). Valid fluid flow force labels: FLOW (fluid
flow). Valid electric force labels: AMPS (current flow); CHRG
(electric charge). Valid magnetic force labels: FLUX (scalar
magnetic flux); CSGX, CSGY, or CSGZ (magnetic current segments);
CSG (CSGX, CSGY, and CSGZ). Valid diffusion labels: CONC
(concentration); RATE (diffusion flow rate).
Notes
-----
Selects a degree of freedom label set for reference by other commands.
The label set is used on certain commands where ALL is either input in
the degree of freedom label field or implied. The active label set has
no effect on the solution degrees of freedom. Specified labels which
are not active in the model (from the ET or DOF command) are ignored.
As a convenience, a set of force labels corresponding to the degree of
freedom labels is also selected. For example, selecting UX also causes
FX to be selected (and vice versa). The force label set is used on
certain commands where ALL is input in the force label field.
This command is valid in any processor.
"""
command = "DOFSEL,%s,%s,%s,%s,%s,%s,%s" % (str(type_), str(
dof1), str(dof2), str(dof3), str(dof4), str(dof5), str(dof6))
return self.run(command, **kwargs)
def esel(self, type_: str = "", item: str = "", comp: str = "",
vmin: Union[str, int, float] = "",
vmax: Union[str, int, float] = "", vinc: MapdlInt = "",
kabs: MapdlInt = "", **kwargs) -> Optional[str]:
"""Selects a subset of elements.
APDL Command: ESEL
Parameters
----------
type\_
Label identifying the type of select:
- S - Select a new set (default).
- R - Reselect a set from the current set.
- A - Additionally select a set and extend the current set.
- U - Unselect a set from the current set.
- ALL - Restore the full set.
- NONE - Unselect the full set.
- INVE - Invert the current set (selected becomes
unselected and vice versa).
- STAT - Display the current select status.
item
Label identifying data, see Table 110: ESEL - Valid Item
and Component Labels. Some items also require a
component label. Defaults to ELEM. If Item = STRA
(straightened), elements are selected whose midside nodes
do not conform to the curved line or non-flat area on
which they should lie. (Such elements are sometimes
formed during volume meshing (VMESH) in an attempt to
avoid excessive element distortion.) You should
graphically examine any such elements to evaluate their
possible effect on solution accuracy.
comp
Component of the item (if required). Valid component
labels are shown in Table 110: ESEL - Valid Item and
Component Labels below.
vmin
Minimum value of item range. Ranges are element numbers,
attribute numbers, load values, or result values
as appropriate for the item. A component name (as
specified via the CM command) can also be substituted for
VMIN (in which case VMAX and VINC are ignored).
vmax
Maximum value of item range. VMAX defaults to VMIN for
input values. For result values, VMAX defaults to infinity if VMIN is
positive, or to zero if VMIN is negative.
vinc
Value increment within range. Used only with integer
ranges (such as for element and attribute numbers).
Defaults to 1. VINC cannot be negative.
kabs
Absolute value key:
- `kabs = 0` - Check sign of value during selection.
- `kabs = 1` - Use absolute value during selection (sign
ignored).
Notes
-----
The fields `item`, `comp`, `vmin`, `vmax`, `vinc` and `kabs` are
used only with `type_` = `"S"`, `"R"`, `"A"`, or `"U"`.
Selects elements based on values of a labeled item and
component. For example, to select a new set of elements
based on element numbers 1
through 7, use
>>> mapdl.esel('S', 'ELEM', '', 1, 7)
The subset is used when the ALL label is entered (or implied)
on other commands, such as
>>> mapdl.elist('ALL')
Only data identified by element number are
selected. Selected data are internally flagged; no actual
removal of data from the database occurs. Different element
subsets cannot be used for different load steps [SOLVE] in a
/SOLU sequence. The subset used in the first load step
will be used for all subsequent load steps regardless of
subsequent ESEL specifications.
This command is valid in any processor.
Elements crossing the named path (see PATH command) will be
selected. This option is only available in PREP7 and POST1.
If no geometry data has been mapped to the path (i.e.,
via PMAP and PDEF commands), the path will assume the default
mapping option (PMAP,UNIFORM) to map the geometry prior to
selecting the elements. If an invalid path name is
given, the ESEL command is ignored (status of selected
elements is unchanged). If there are no elements crossing the
path, the ESEL command will return zero elements selected.
For selections based on non-integer numbers (coordinates,
results, etc.), items that are within the range VMIN -Toler
and VMAX + Toler are selected. The default tolerance Toler is
based on the relative values of VMIN and VMAX as follows:
If VMIN = VMAX, Toler = 0.005 x VMIN.
If VMIN = VMAX = 0.0, Toler = 1.0E-6.
If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX - VMIN).
Use the SELTOL command to override this default and specify
Toler explicitly.
Table: 133:: : ESEL - Valid Item and Component Labels
Examples
--------
Select elements using material numbers 2, 4 and 6.
>>> mapdl.esel('S', 'MAT', '', 2, 6, 2)
Of these, reselect SOLID185 element types
>>> mapdl.esel('R', 'ENAME', '', 185)
Select elements assigned to material property 2
>>> mapdl.esel('S', 'MAT', '', 2)
Note, this command is equivalent to:
>>> mapdl.esel('S', 'MAT', vmin=2)
"""
command = f"ESEL,{type_},{item},{comp},{vmin},{vmax},{vinc},{kabs}"
return self.run(command, **kwargs)
def esla(self, type_: str = "", **kwargs) -> Optional[str]:
"""Selects those elements associated with the selected areas.
APDL Command: ESLA
Parameters
----------
type_
Label identifying the type of element select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
Selects area elements belonging to meshed [AMESH], selected
[ASEL] areas.
This command is valid in any processor.
"""
command = f"ESLA,{type_}"
return self.run(command, **kwargs)
def esll(self, type_: str = "", **kwargs) -> Optional[str]:
"""Selects those elements associated with the selected lines.
APDL Command: ESLL
Parameters
----------
type_
Label identifying the type of element select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
Selects line elements belonging to meshed [LMESH], selected
[LSEL] lines.
This command is valid in any processor.
"""
command = f"ESLL,{type_}"
return self.run(command, **kwargs)
def esln(self, type_: str = "", ekey: MapdlInt = "",
nodetype: str = "", **kwargs) -> Optional[str]:
"""Selects those elements attached to the selected nodes.
APDL Command: ESLN
Parameters
----------
type_
Label identifying the type of element selected:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ekey
Node set key:
0 - Select element if any of its nodes are in the
selected nodal set (default).
1 - Select element only if all of its nodes are in the
selected nodal set.
nodetype
Label identifying type of nodes to consider when selecting:
ALL - Select elements considering all of their nodes (
default).
ACTIVE - Select elements considering only their active
nodes. An active node is a node
that contributes DOFs to the model.
INACTIVE - Select elements considering only their
inactive nodes (such as orientation or
radiation nodes).
CORNER - Select elements considering only their corner
nodes.
MID - Select elements considering only their midside nodes.
Notes
-----
ESLN selects elements which have any (or all EKEY) NodeType
nodes in the currently-selected set of nodes. Only elements
having nodes in the currently-selected set can be selected.
This command is valid in any processor.
"""
command = f"ESLN,{type_},{ekey},{nodetype}"
return self.run(command, **kwargs)
def eslv(self, type_: str = "", **kwargs) -> Optional[str]:
"""Selects elements associated with the selected volumes.
APDL Command: ESLV
Parameters
----------
type_
Label identifying the type of element selected:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
Selects volume elements belonging to meshed [VMESH], selected
[VSEL]
volumes.
This command is valid in any processor.
"""
command = f"ESLV,{type_}"
return self.run(command, **kwargs)
def ksel(self, type_="", item="", comp="", vmin="", vmax="", vinc="",
kabs="", **kwargs):
"""Selects a subset of keypoints or hard points.
APDL Command: KSEL
Parameters
----------
type\_
Label identifying the type of select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ALL - Restore the full set.
NONE - Unselect the full set.
INVE - Invert the current set (selected becomes unselected and vice versa).
STAT - Display the current select status.
Notes
-----
Selects a subset of keypoints or hard points. For example, to select a
new set of keypoints based on keypoint numbers 1 through 7, use
>>> mapdl.ksel('S', 'KP', '', 1, 7)
The selected subset is used when the ALL label is
entered (or implied) on other commands, such as KLIST,ALL. Only data
identified by keypoint number are selected. Data are flagged as
selected and unselected; no data are actually deleted from the
database.
This command is valid in any processor.
For selections based on non-integer numbers (coordinates, results,
etc.), items that are within the range VMIN -Toler and VMAX + Toler are
selected. The default tolerance Toler is based on the relative values
of VMIN and VMAX as follows:
If VMIN = VMAX, Toler = 0.005 x VMIN.
If VMIN = VMAX = 0.0, Toler = 1.0E-6.
If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX - VMIN).
Use the SELTOL command to override this default and specify Toler
explicitly.
Table: 203:: : KSEL - Valid Item and Component Labels
Examples
--------
To select a single keypoint (keypoint 1)
>>> mapdl.ksel('S', 'KP', '', 1)
"""
command = "KSEL,%s,%s,%s,%s,%s,%s,%s" % (str(type_), str(
item), str(comp), str(vmin), str(vmax), str(vinc), str(kabs))
return self.run(command, **kwargs)
def ksll(self, type_="", **kwargs):
"""Selects those keypoints contained in the selected lines.
APDL Command: KSLL
Parameters
----------
type\_
Label identifying the type of keypoint select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
This command is valid in any processor.
"""
command = "KSLL,%s" % (str(type_))
return self.run(command, **kwargs)
def ksln(self, type_="", **kwargs):
"""Selects those keypoints associated with the selected nodes.
APDL Command: KSLN
Parameters
----------
type\_
Label identifying the type of keypoint select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
Valid only if the nodes were generated by a meshing operation [KMESH,
LMESH, AMESH, VMESH] on a solid model that contains the associated
keypoints.
This command is valid in any processor.
"""
command = "KSLN,%s" % (str(type_))
return self.run(command, **kwargs)
def lsel(self, type_="", item="", comp="", vmin="", vmax="", vinc="",
kswp="", **kwargs):
"""Selects a subset of lines.
APDL Command: LSEL
Parameters
----------
type\_
Label identifying the type of select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ALL - Restore the full set.
NONE - Unselect the full set.
INVE - Invert the current set (selected becomes unselected and vice versa).
STAT - Display the current select status.
Notes
-----
Selects lines based on values of a labeled item and component. For
example, to select a new set of lines based on line numbers 1 through
7, use LSEL,S,LINE,,1,7. The subset is used when the ALL label is
entered (or implied) on other commands, such as LLIST,ALL. Only data
identified by line number are selected. Data are flagged as selected
and unselected; no data are actually deleted from the database.
If Item = LCCA, the command selects only those lines that were created
by concatenation. The KSWP field is processed, but the Comp, VMIN,
VMAX, and VINC fields are ignored.
If Item = HPT, the command selects only those lines that contain hard
points.
Item = RADIUS is only valid for lines that are circular arcs.
LSEL is valid in any processor.
For selections based on non-integer numbers (coordinates, results,
etc.), items that are within the range VMIN -Toler and VMAX +Toler are
selected. The default tolerance Toler is based on the relative values
of VMIN and VMAX as follows:
If VMIN = VMAX, Toler = 0.005 x VMIN.
If VMIN = VMAX = 0.0, Toler = 1.0E-6.
If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX - VMIN).
Use the SELTOL command to override this default and specify Toler
explicitly.
Table: 204:: : LSEL - Valid Item and Component Labels
"""
command = "LSEL,%s,%s,%s,%s,%s,%s,%s" % (str(type_), str(
item), str(comp), str(vmin), str(vmax), str(vinc), str(kswp))
return self.run(command, **kwargs)
def lsla(self, type_="", **kwargs):
"""Selects those lines contained in the selected areas.
APDL Command: LSLA
Parameters
----------
type\_
Label identifying the type of line select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
This command is valid in any processor.
"""
command = "LSLA,%s" % (str(type_))
return self.run(command, **kwargs)
def lslk(self, type_="", lskey="", **kwargs):
"""Selects those lines containing the selected keypoints.
APDL Command: LSLK
Parameters
----------
type\_
Label identifying the type of line select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
lskey
Specifies whether all contained line keypoints must be selected
[KSEL]:
0 - Select line if any of its keypoints are in the selected keypoint set.
1 - Select line only if all of its keypoints are in the selected keypoint set.
Notes
-----
This command is valid in any processor.
"""
command = "LSLK,%s,%s" % (str(type_), str(lskey))
return self.run(command, **kwargs)
def nsel(self, type_="", item="", comp="", vmin="", vmax="", vinc="",
kabs="", **kwargs):
"""Selects a subset of nodes.
APDL Command: NSEL
Parameters
----------
type\_
Label identifying the type of select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ALL - Restore the full set.
NONE - Unselect the full set.
INVE - Invert the current set (selected becomes unselected and vice versa).
STAT - Display the current select status.
Notes
-----
Selects a subset of nodes. For example, to select a new set of nodes
based on node numbers 1 through 7, do
>>> mapdl.nsel('S','NODE','',1,7)
The subset is used when the `'ALL'` label is entered (or implied)
on other commands, such as
>>> mapdl.nlist('ALL')
Only data identified by node number are selected. Data
are flagged as selected and unselected; no data are actually deleted
from the database.
When selecting nodes by results, the full graphics value is used,
regardless of whether PowerGraphics is on.
Solution result data consists of two types, 1) nodal degree of freedom
--results initially calculated at the nodes (such as displacement,
temperature, pressure, etc.), and 2) element--results initially
calculated elsewhere (such as at an element integration point or
thickness location) and then recalculated at the nodes (such as
stresses, strains, etc.). Various element results also depend upon the
recalculation method and the selected results location [AVPRIN, RSYS,
FORCE, LAYER and SHELL].
You must have all the nodes (corner and midside nodes) on the external
face of the element selected to use Item = EXT.
This command is valid in any processor.
For Selects based on non-integer numbers (coordinates, results, etc.),
items that are within the range VMIN-Toler and VMAX+Toler are selected.
The default tolerance Toler is based on the relative values of VMIN and
VMAX as follows:
If VMIN = VMAX, Toler = 0.005 x VMIN.
If VMIN = VMAX = 0.0, Toler = 1.0E-6.
If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX-VMIN).
Use the SELTOL command to override this default and specify Toler
explicitly.
Table: 208:: : NSEL - Valid Item and Component Labels
Table: 209:: : NSEL - Valid Item and Component Labels for Nodal DOF
Result Values
Examples
--------
Select nodes at `x == 0`, Of these, reselect nodes between ``1 < Y
< 10``, then finally unselect nodes between ``5 < Y < 6``.
>>> mapdl.nsel('S', 'LOC', 'X', 0)
>>> mapdl.nsel('R', 'LOC', 'Y', 1, 10)
>>> mapdl.nsel('U', 'LOC', 'Y', 5, 6)
For other coordinate systems activate the coord system first.
First, change to cylindrical coordinate system, select nodes at
``radius == 5``. Reselect nodes from 0 to 90 degrees.
>>> mapdl.csys(1)
>>> mapdl.nsel('S', 'LOC', 'X', 5)
>>> mapdl.nsel('R', 'LOC', 'Y', 0, 90)
Note that the labels X, Y, and Z are always used, regardless of which
coordinate system is activated. They take on different meanings in
different systems Additionally, angles are always in degrees and
NOT radians.
Select elements assigned to material property 2
>>> mapdl.esel('S', 'MAT', '', 2)
Select the nodes these elements use
>>> mapdl.nsle()
Reselect nodes on the element external surfaces
>>> mapdl.nsel('R', 'EXT')
Change to cylindrical coordinates
>>> mapdl.csys(1)
Reselect nodes with radius=5
>>> mapdl.nsel('R', 'LOC', 'X', 5)
"""
command = "NSEL,%s,%s,%s,%s,%s,%s,%s" % (str(type_), str(
item), str(comp), str(vmin), str(vmax), str(vinc), str(kabs))
return self.run(command, **kwargs)
def nsla(self, type_="", nkey="", **kwargs):
"""Selects those nodes associated with the selected areas.
APDL Command: NSLA
Parameters
----------
type\_
Label identifying the type of node select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
nkey
Specifies whether only interior area nodes are to be selected:
0 - Select only nodes interior to selected areas.
1 - Select all nodes (interior to area, interior to lines, and at keypoints)
associated with the selected areas.
Notes
-----
Valid only if the nodes were generated by an area meshing operation
[AMESH, VMESH] on a solid model that contains the selected areas.
This command is valid in any processor.
Examples
--------
Select area(s) at location x=0. Note that the selection tolerance
above and below 0 will depend on the value of ``mapdl.seltol``
>>> mapdl.asel('S', 'LOC', 'X', 0)
Select nodes residing on those areas.
>>> mapdl.nsla('S', 1)
"""
command = "NSLA,%s,%s" % (str(type_), str(nkey))
return self.run(command, **kwargs)
def nsle(self, type_="", nodetype="", num="", **kwargs):
"""Selects those nodes attached to the selected elements.
APDL Command: NSLE
Parameters
----------
type\_
Label identifying the type of node select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
nodetype
Label identifying type of nodes to consider when selecting:
ALL - Select all nodes of the selected elements (default).
ACTIVE - Select only the active nodes. An active node is a node that contributes DOFs to
the model.
INACTIVE - Select only inactive nodes (such as orientation or radiation).
CORNER - Select only corner nodes.
MID - Select only midside nodes.
POS - Select nodes in position Num.
FACE - Select nodes on face Num.
num
Position or face number for NodeType = POS or FACE.
Notes
-----
NSLE selects NodeType nodes attached to the currently-selected set of
elements. Only nodes on elements in the currently-selected element set
can be selected.
.. note::
When using degenerate hexahedral elements, NSLE, U,CORNER and
NSLE,S,MID will not select the same set of nodes because some
nodes appear as both corner and midside nodes.
This command is valid in any processor.
Examples
--------
Select elements assigned to material property 2 and then select
the nodes these elements use.
>>> mapdl.esel('S', 'MAT', '', 2)
>>> mapdl.nsle()
"""
command = "NSLE,%s,%s,%s" % (str(type_), str(nodetype), str(num))
return self.run(command, **kwargs)
def nslk(self, type_="", **kwargs):
"""Selects those nodes associated with the selected keypoints.
APDL Command: NSLK
Parameters
----------
type\_
Label identifying the type of node select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
Notes
-----
Valid only if the nodes were generated by a keypoint meshing operation
[KMESH, LMESH, AMESH, VMESH] on a solid model that contains the
selected keypoints.
This command is valid in any processor.
"""
command = "NSLK,%s" % (str(type_))
return self.run(command, **kwargs)
def nsll(self, type_="", nkey="", **kwargs):
"""Selects those nodes associated with the selected lines.
APDL Command: NSLL
Parameters
----------
type\_
Label identifying the type of node select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
nkey
Specifies whether only interior line nodes are to be selected:
0 - Select only nodes interior to selected lines.
1 - Select all nodes (interior to line and at keypoints)
associated with the selected lines.
Notes
-----
Valid only if the nodes were generated by a line meshing
operation [LMESH, AMESH, VMESH] on a solid model that contains
the associated lines.
This command is valid in any processor.
"""
command = "NSLL,%s,%s" % (str(type_), str(nkey))
return self.run(command, **kwargs)
def nslv(self, type_="", nkey="", **kwargs):
"""Selects those nodes associated with the selected volumes.
APDL Command: NSLV
Parameters
----------
type\_
Label identifying the type of node select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
nkey
Specifies whether only interior volume nodes are to be selected:
0 - Select only nodes interior to selected volumes.
1 - Select all nodes (interior to volume, interior to areas, interior to lines, and
at keypoints) associated with the selected volumes.
Notes
-----
Valid only if the nodes were generated by a volume meshing operation
[VMESH] on a solid model that contains the selected volumes.
This command is valid in any processor.
"""
command = "NSLV,%s,%s" % (str(type_), str(nkey))
return self.run(command, **kwargs)
def partsel(self, type_="", pmin="", pmax="", pinc="", **kwargs):
"""Selects a subset of parts in an explicit dynamic analysis.
APDL Command: PARTSEL
Parameters
----------
type\_
Label identifying type of select. Because PARTSEL is a command
macro, the label must be enclosed in single quotes.
'S' - Select a new set (default).
'R' - Reselect a set from the current set.
'A' - Additionally select a set and extend the current set.
'U' - Unselect a set from the current set.
'ALL' - Select all parts.
'NONE' - Unselect all parts.
'INVE' - Invert the current selected set.
Notes
-----
PARTSEL invokes an ANSYS macro that selects parts in an explicit
dynamic analysis. When PARTSEL is executed, an element component is
automatically created for each existing part. For example, the elements
that make up PART 1 are grouped into the element component
_PART1. Each time the PARTSEL command is executed, components
for unselected parts will be unselected. To plot selected parts,
choose Utility Menu> Plot> Parts in the GUI or issue the
command PARTSEL,'PLOT'.
After selecting parts, if you change the selected set of nodes or
elements and then plot parts, the nodes and elements associated with
the previously selected parts (from the last PARTSEL command) will
become the currently selected set.
Note:: : A more efficient way to select and plot parts is to use the
ESEL (with ITEM = PART) and EPLOT commands. We recommend using ESEL
instead of PARTSEL since PARTSEL will be phased out in a future
release. Note that the menu path mentioned above for plotting parts
does not work with the ESEL command; use Utility Menu> Plot> Elements
instead.
In an explicit dynamic small restart analysis (EDSTART,2), PARTSEL can
be used to unselect a part during the solution even if it is referenced
in some way (such as in a contact definition). (Note that ESEL cannot
be used for this purpose.) However, in a new analysis or a full restart
analysis (EDSTART,3), all parts that are used in some type of
definition must be selected at the time of solution.
This command is valid in any processor.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = "PARTSEL,%s,%s,%s,%s" % (
str(type_), str(pmin), str(pmax), str(pinc))
return self.run(command, **kwargs)
def vsel(self, type_="", item="", comp="", vmin="", vmax="", vinc="",
kswp="", **kwargs):
"""Selects a subset of volumes.
APDL Command: VSEL
Parameters
----------
type\_
Label identifying the type of volume select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ALL - Restore the full set.
NONE - Unselect the full set.
INVE - Invert the current set (selected becomes unselected and vice versa).
STAT - Display the current select status.
Notes
-----
Selects volumes based on values of a labeled item and component. For
example, to select a new set of volumes based on volume numbers 1
through 7, use VSEL,S,VOLU,,1,7. The subset is used when the ALL label
is entered (or implied) on other commands, such as VLIST,ALL. Only
data identified by volume number are selected. Data are flagged as
selected and unselected; no data are actually deleted from the
database.
This command is valid in any processor.
For Selects based on non-integer numbers (coordinates, results, etc.),
items that are within the range VMIN-Toler and VMAX+Toler are selected.
The default tolerance Toler is based on the relative values of VMIN and
VMAX as follows:
If VMIN = VMAX, Toler = 0.005 x VMIN.
If VMIN = VMAX = 0.0, Toler = 1.0E-6.
If VMAX ≠ VMIN, Toler = 1.0E-8 x (VMAX-VMIN).
Use the SELTOL command to override this default and specify Toler
explicitly.
Table: 251:: : VSEL - Valid Item and Component Labels
"""
command = "VSEL,%s,%s,%s,%s,%s,%s,%s" % (str(type_), str(
item), str(comp), str(vmin), str(vmax), str(vinc), str(kswp))
return self.run(command, **kwargs)
def vsla(self, type_="", vlkey="", **kwargs):
"""Selects those volumes containing the selected areas.
APDL Command: VSLA
Parameters
----------
type\_
Label identifying the type of volume select:
S - Select a new set (default).
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
vlkey
Specifies whether all contained volume areas must be selected
[ASEL]:
0 - Select volume if any of its areas are in the selected area set.
1 - Select volume only if all of its areas are in the selected area set.
Notes
-----
This command is valid in any processor.
"""
command = "VSLA,%s,%s" % (str(type_), str(vlkey))
return self.run(command, **kwargs)
def seltol(self, toler="", **kwargs):
"""Select those volumes containing the selected areas.
APDL Command: SELTOL
Parameters
----------
toler
Tolerance value. If blank, restores the default tolerance
logic.
Notes
-----
For selects based on non-integer numbers (e.g. coordinates,
results, etc.), items within the range VMIN - Toler and VMAX +
Toler are selected, where VMIN and VMAX are the range values input
on the xSEL commands (ASEL, ESEL, KSEL, LSEL, NSEL, and VSEL).
The default tolerance logic is based on the relative values of
VMIN and VMAX as follows:
If ``vmin == vmax``, ``toler = 0.005*vmin``
If ``vmin == vmax == 0.0``, ``toler = 1.0E-6``.
If ``vmax != vmin``, ``toler = 1.0E-8 x (vmax - vmin)``.
This command is typically used when ``vmax - vmin`` is very large so
that the computed default tolerance is therefore large and the
xSEL commands selects more than what is desired.
Toler remains active until respecified by a subsequent seltol
command. ``seltol()`` resets back to the default toler value.
Examples
--------
Set selection tolarance to 1E-5
>>> seltol(1E-5)
"""
return self.run(f"SELTOL{toler}", **kwargs)
```
#### File: _commands/post1_/load_case.py
```python
class LoadCase:
def lcabs(self, lcno="", kabs="", **kwargs):
"""Specifies absolute values for load case operations.
APDL Command: LCABS
Parameters
----------
lcno
Load case pointer number. If ALL, apply to all selected load cases
[LCSEL].
kabs
Absolute value key:
0 - Use algebraic values of load case LCNO in operations.
1 - Use absolute values of load case LCNO in operations.
Notes
-----
Causes absolute values to be used in the load case operations [LCASE or
LCOPER]. Absolute values are taken prior to assigning a load case
factor [LCFACT] and are applied only to defined load cases [LCDEF].
"""
command = f"LCABS,{lcno},{kabs}"
return self.run(command, **kwargs)
def lcase(self, lcno="", **kwargs):
"""Reads a load case into the database.
APDL Command: LCASE
Parameters
----------
lcno
Load case pointer number [LCDEF,STAT]. Defaults to 1.
Notes
-----
Reads a load case into the database. Load cases are created as
described on the LCDEF or LCWRITE commands. The results portion of the
database and the applied forces and displacements are cleared before
reading the data in. Absolute values [LCABS] and scale factors
[LCFACT] can be applied during the read operation.
"""
command = f"LCASE,{lcno}"
return self.run(command, **kwargs)
def lcdef(self, lcno="", lstep="", sbstep="", kimg="", **kwargs):
"""Creates a load case from a set of results on a results file.
APDL Command: LCDEF
Parameters
----------
lcno
Arbitrary pointer number (1-99) to be assigned to the load case
specified by LSTEP, SBSTEP and by the FILE command. Defaults to 1
+ previous value.
lstep
Load step number to be defined as the load case. Defaults to one.
sbstep
Substep number. Defaults to the last substep of the load step.
kimg
Used only with results from complex analyses:
0 - Use real part of complex solution
1 - Use imaginary part.
Notes
-----
Creates a load case by establishing a pointer to a set of results on a
results file (written during the ANSYS solution phase). This pointer
(LCNO) can then be used on the LCASE or LCOPER commands to read the
load case data into the database.
Issue LCDEF,ERASE to delete all load case pointers (and all load case
files, if any). Issue LCDEF,LCNO,ERASE to delete only the specific
load case pointer LCNO (and its file, if any). With the ERASE options,
all pointers are deleted; however only files with the default extension
[LCWRITE] are deleted. Issue LCDEF,STAT for status of all selected
load cases [LCSEL], or LCDEF,STAT,ALL for status of all load cases.
The STAT command may be used to list all load cases. See also LCFILE
to establish a pointer to a set of results on a load case file (written
by LCWRITE). Harmonic element data read from a result file load case is
stored at the zero-degree position.
"""
command = f"LCDEF,{lcno},{lstep},{sbstep},{kimg}"
return self.run(command, **kwargs)
def lcfact(self, lcno="", fact="", **kwargs):
"""Defines scale factors for load case operations.
APDL Command: LCFACT
Parameters
----------
lcno
Load case pointer number. If ALL, apply to all selected load cases
[LCSEL].
fact
Scale factor applied to load case LCNO. Blank defaults to 1.0.
Notes
-----
Defines scale factors to be used in the load case operations [LCASE or
LCOPER]. Scale factors are applied after an absolute value operation
[LCABS] and are applied only to defined load cases [LCDEF].
"""
command = f"LCFACT,{lcno},{fact}"
return self.run(command, **kwargs)
def lcfile(self, lcno="", fname="", ext="", **kwargs):
"""Creates a load case from an existing load case file.
APDL Command: LCFILE
Parameters
----------
lcno
Arbitrary (1-99) pointer number assigned to this load case.
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
Notes
-----
Creates a load case by establishing a pointer to an existing load case
file [LCWRITE]. This pointer (LCNO) can then be used on the LCASE or
LCOPER commands to read the load case data into the database. This
command is typically used to reestablish load case pointers in a new
ANSYS session (pointers are not saved on the database file), or when
more than one pointer to a single load case is desired. See the LCDEF
command for status and erase operations. See also LCDEF to establish a
pointer to a set of results on a results file (written during the ANSYS
solution phase).
"""
command = f"LCFILE,{lcno},{fname},{ext}"
return self.run(command, **kwargs)
def lcoper(self, oper="", lcase1="", oper2="", lcase2="", **kwargs):
"""Performs load case operations.
APDL Command: LCOPER
Parameters
----------
oper
Valid operations are:
ZERO - Zero results portion of database (LCASE1 ignored).
SQUA - Square database values (LCASE1 ignored).
SQRT - Square root of database (absolute) values (LCASE1 ignored).
LPRIN - Recalculate line element principal stresses (LCASE1 ignored). Stresses are as
shown for the NMISC items of the ETABLE command for the
specific line element type.
ADD - Add LCASE1 to database values.
SUB - Subtract LCASE1 from database values.
SRSS - Square root of the sum of the squares of database and LCASE1.
MIN - Compare and save in database the algebraic minimum of database and LCASE1.
MAX - Compare and save in database the algebraic maximum of database and LCASE1.
ABMN - Compare and save in database the absolute minimum of database and LCASE1 (based
on magnitudes, then apply the corresponding sign).
ABMX - Compare and save in database the absolute maximum of database and LCASE1 (based
on magnitudes, then apply the corresponding sign).
lcase1
First load case in the operation (if any). See LCNO of the LCDEF
command. If ALL, repeat operations using all selected load cases
[LCSEL].
oper2
Valid operations are:
MULT - Multiplication: LCASE1*LCASE2
CPXMAX - This option does a phase angle sweep to calculate the maximum of derived
stresses and equivalent strain for a complex solution
where LCASE1 is the real part and LCASE2 is the imaginary
part. The Oper field is not applicable with this option.
Also, the LCABS and SUMTYPE commands have no effect on
this option. The value of S3 will be a minimum. This
option does not apply to derived displacement amplitude
(USUM). Load case writing (LCWRITE) is not supported. See
POST1 and POST26 – Complex Results Postprocessing in the
Mechanical APDL Theory Reference for more information.
lcase2
Second load case. Used only with Oper2 operations.
Notes
-----
LCOPER operates on the database and one or two load cases according to:
Database = Database Oper (LCASE1 Oper2 LCASE2)
where operations Oper and Oper2 are as described above. Absolute
values and scale factors may be applied to the load cases before the
operations [LCABS, LCFACT]. If LCASE1 is not specified, only operation
Oper is performed on the current database. If LCASE2 is specified,
operation Oper2 will be performed before operation Oper. If LCASE2 is
not specified, operation Oper2 is ignored. Solution items not
contained [OUTRES] in either the database or the applicable load cases
will result in a null item during a load case operation. Harmonic
element data read from a result file load case are processed at zero
degrees. All load case combinations are performed in the solution
coordinate system, and the data resulting from load case combinations
are stored in the solution coordinate system. The resultant data are
then transformed to the active results coordinate system [RSYS] when
listed or displayed. Except in the cases of Oper = LPRIN, ADD, or SUB,
you must use RSYS,SOLU to list or display, and in the case of layered
elements, the layer (LAYER) must also be specified.
Use the FORCE command prior to any combination operation to correctly
combine the requested force type.
If Oper2=CPXMAX, the derived stresses and strain calculation do not
apply to line elements.
"""
command = f"LCOPER,{oper},{lcase1},{oper2},{lcase2}"
return self.run(command, **kwargs)
def lcsel(self, type_="", lcmin="", lcmax="", lcinc="", **kwargs):
"""Selects a subset of load cases.
APDL Command: LCSEL
Parameters
----------
type\_
Label identifying the type of select:
S - Select a new set.
R - Reselect a set from the current set.
A - Additionally select a set and extend the current set.
U - Unselect a set from the current set.
ALL - Restore the full set.
NONE - Unselect the full set.
INVE - Invert the current set (selected becomes unselected and vice versa).
STAT - Display the current select status.
lcmin
Minimum value of load case pointer range.
lcmax
Maximum value of load case pointer range. LCMAX defaults to LCMIN.
lcinc
Value increment within range. Defaults to 1. LCINC cannot be
negative.
Notes
-----
Selects a subset of load cases for other operations. For example, to
select a new set of load cases based on load cases 1 through 7, use
LCSEL,S,1,7. The subset is used when the ALL label is entered (or
implied) on other commands, such as LCFACT, LCABS, LCOPER, etc. Load
cases are flagged as selected and unselected; no load case pointers
[LCDEF, LCWRITE, LCFILE] are actually deleted from the database.
"""
command = f"LCSEL,{type_},{lcmin},{lcmax},{lcinc}"
return self.run(command, **kwargs)
def lcsum(self, lab="", **kwargs):
"""Specifies whether to process non-summable items in load case
APDL Command: LCSUM
operations.
Parameters
----------
lab
Combination option
(blank) - Only combine summable items [default].
ALL - Combine all items including non summable items.
Notes
-----
Allows non-summable items (e.g. plastic strains) to be included in load
combinations. Issue LCSUM,ALL before the first load case operation
(LCXX command). May also be used to include nonsummable items in the
appending of a results file (RAPPND command).
"""
command = f"LCSUM,{lab}"
return self.run(command, **kwargs)
def lcwrite(self, lcno="", fname="", ext="", **kwargs):
"""Creates a load case by writing results to a load case file.
APDL Command: LCWRITE
Parameters
----------
lcno
Arbitrary pointer number (1-99) to be assigned to this load case.
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
Notes
-----
Creates a load case by writing the results data in the database to a
load case file. The database remains unchanged by this operation. A
pointer is also established to the written set of results on the load
case file. This pointer (LCNO) can then be used on the LCASE or LCOPER
commands to read the load case data into the database. By default,
only summable results data (such as displacements, stresses, elastic
strains) and constant results data (such as volume) are written to the
load case file unless requested (LCSUM command). Non-summable results
data (such as plastic strains, strain energy), boundary conditions, and
nodal loads are not written to the load case file. The load case file
may be named by default or by a user name. Rewriting to the same file
overwrites the previous data. See the LCDEF command for status and
erase operations.
"""
command = f"LCWRITE,{lcno},{fname},{ext}"
return self.run(command, **kwargs)
def lczero(self, **kwargs):
"""Zeroes the results portion of the database.
APDL Command: LCZERO
Notes
-----
Often used before the LCOPER command. Same as LCOPER,ZERO.
"""
command = f"LCZERO,"
return self.run(command, **kwargs)
def rappnd(self, lstep="", time="", **kwargs):
"""Appends results data from the database to the results file.
APDL Command: RAPPND
Parameters
----------
lstep
Load step number to be assigned to the results data set. If it is
the same as an existing load step number on the results file, the
appended load step will be inaccessible. Defaults to 1.
time
Time value to be assigned to the results data set. Defaults to
0.0. A time value greater than the last load step should be used.
Notes
-----
This command is typically used to append the results from a load case
combination to the results file. See the LCWRITE command to create a
separate load case file. Only summable and constant data are written
to the results file by default; non-summable data are not written
unless requested (LCSUM command). RAPPND should not be used to append
results from a harmonic analysis.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"RAPPND,{lstep},{time}"
return self.run(command, **kwargs)
```
#### File: _commands/post26_/listing.py
```python
class Listing:
def extrem(self, nvar1="", nvar2="", ninc="", **kwargs):
"""Lists the extreme values for variables.
APDL Command: EXTREM
Parameters
----------
nvar1, nvar2, ninc
List extremes for variables NVAR1 through NVAR2 in steps of NINC.
Variable range defaults to its maximum. NINC defaults to 1.
Notes
-----
Lists the extreme values (and the corresponding times) for stored and
calculated variables. Extremes for stored variables are automatically
listed as they are stored. Only the real part of a complex number is
used. Extreme values may also be assigned to parameters [*GET].
"""
command = f"EXTREM,{nvar1},{nvar2},{ninc}"
return self.run(command, **kwargs)
def lines(self, n="", **kwargs):
"""Specifies the length of a printed page.
APDL Command: LINES
Parameters
----------
n
Number of lines per page (defaults to 20). (Minimum allowed = 11).
Notes
-----
Specifies the length of a printed page (for use in reports, etc.).
"""
command = f"LINES,{n}"
return self.run(command, **kwargs)
def nprint(self, n="", **kwargs):
"""Defines which time points stored are to be listed.
APDL Command: NPRINT
Parameters
----------
n
List data associated with every N time (or frequency) point(s),
beginning with the first point stored (defaults to 1).
Notes
-----
Defines which time (or frequency) points within the range stored are to
be listed.
"""
command = f"NPRINT,{n}"
return self.run(command, **kwargs)
def prcplx(self, key="", **kwargs):
"""Defines the output form for complex variables.
APDL Command: PRCPLX
Parameters
----------
key
Output form key:
0 - Real and imaginary parts.
1 - Amplitude and phase angle. Stored real and imaginary data are converted to
amplitude and phase angle upon output. Data remain stored as
real and imaginary parts.
Notes
-----
Defines the output form for complex variables. Used only with harmonic
analyses (ANTYPE,HARMIC).
All results data are stored in the form of real and imaginary
components and converted to amplitude and/or phase angle as specified
via the PRCPLX command. The conversion is not valid for derived
results (such as principal stress/strain, equivalent stress/strain and
USUM).
"""
command = f"PRCPLX,{key}"
return self.run(command, **kwargs)
def prtime(self, tmin="", tmax="", **kwargs):
"""Defines the time range for which data are to be listed.
APDL Command: PRTIME
Parameters
----------
tmin
Minimum time (defaults to the first point stored).
tmax
Maximum time (defaults to the last point stored).
Notes
-----
Defines the time (or frequency) range (within the range stored) for
which data are to be listed.
"""
command = f"PRTIME,{tmin},{tmax}"
return self.run(command, **kwargs)
def prvar(self, nvar1="", nvar2="", nvar3="", nvar4="", nvar5="", nvar6="",
**kwargs):
"""Lists variables vs. time (or frequency).
APDL Command: PRVAR
Parameters
----------
nvar1, nvar2, nvar3, . . . , nvar6
Variables to be displayed, defined either by the reference number
or a unique thirty-two character name. If duplicate names are used
the command will print the data for the lowest-numbered variable
with that name.
Notes
-----
Lists variables vs. time (or frequency). Up to six variables may be
listed across the line. Time column output format can be changed using
the /FORMAT command arguments Ftype, NWIDTH, and DSIGNF.
"""
command = f"PRVAR,{nvar1},{nvar2},{nvar3},{nvar4},{nvar5},{nvar6}"
return self.run(command, **kwargs)
```
#### File: _commands/post26_/setup.py
```python
from typing import Optional, Union
from ansys.mapdl.core.mapdl_types import MapdlInt, MapdlFloat
class Setup:
def ansol(self, nvar="", node="", item="", comp="", name="", mat="",
real="", ename="", **kwargs):
"""Specifies averaged nodal data to be stored from the results file in the
APDL Command: ANSOL
solution coordinate system.
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV
[NUMVAR]). Overwrites any existing results for this variable.
node
Node number for which data are to be stored.
item
Label identifying the item. General item labels are shown in
Table 126: ANSOL - General Item and Component Labels below. Some
items also require a component label.
comp
Component of the item (if required). General component labels are
shown in Table 126: ANSOL - General Item and Component Labels
below.
name
Thirty-two character name for identifying the item on the printout
and displays. Defaults to an eight character label formed by
concatenating the first four characters of the Item and Comp
labels.
mat
The material number. Average will be computed based on the subset
of elements with the specified material number. DEFAULT: Use all
elements in the active set unless Real and/or Ename is specified.
real
The real number. Average will be computed based on the subset of
elements with the specified real number. DEFAULT: Use all elements
in the active set unless Mat and/or Ename is specified.
ename
The element type name. Average will be computed based on the subset
of elements with the specified element type name. DEFAULT: Use all
elements in the active set unless Mat and/or Real is specified.
Notes
-----
Valid item and component labels for averaged nodal results are listed
in Table: 126:: ANSOL - General Item and Component Labels, below.
All element nodal quantities are obtained in RSYS, Solu and then
averaged.
The ANSOL command defines averaged nodal results data to be stored from
a results file [FILE]. Not all items are valid for all nodes. See the
input and output summary tables of the Element Reference of each
element that is attached to the node for the available items.
COORDINATE SYSTEMS: All element nodal results used by ANSOL for
averaging are in the element coordinate system, except for layered
elements. Layered element results are in the layer coordinate system.
You can further specify the element nodal results, for some elements,
with the SHELL, LAYERP26, and FORCE commands.
ANSOL does not transform results from RSYS, SOLU to other coordinate
systems. Verify that all elements attached to the subject node have the
same coordinate system before using ANSOL.
SHELL ELEMENTS: The default shell element coordinate system is based on
node ordering. For shell elements the adjacent elements could have a
different RSYS,SOLU, making the resultant averaged data inconsistent. A
note to this effect is issued when ANSOL is used in models containing
shell elements. Ensure that consistent coordinate systems are active
for all associated elements used by the ANSOL command.
DERIVED QUANTITIES: Some of the result items supported by ANSOL (see
Table: 126:: ANSOL - General Item and Component Labels) are derived
from the component quantities. Use AVPRIN to specify the principal and
vector sum quantity averaging methods.
DEFAULT: If Mat, Real , and Ename are not specified, all of the
elements attached to the node will be considered. When a material ID,
real constant ID, or element type discontinuity is detected at a node,
a note is issued. For example, in a FSI analysis, a FLUID30 element at
the structure interface would be considered. But since it contains no
SX result, it will not be used during STORE operations.
Table: 126:: : ANSOL - General Item and Component Labels
For more information on the meaning of contact status and its possible
values, see Reviewing Results in POST1 in the Contact Technology Guide.
"""
command = f"ANSOL,{nvar},{node},{item},{comp},{name},{mat},{real},{ename}"
return self.run(command, **kwargs)
def cisol(self, n="", id_="", node="", cont="", dtype="", **kwargs):
"""Stores fracture parameter information in a variable.
APDL Command: CISOL
Parameters
----------
n
Arbitrary reference number or name assigned to this variable.
Number must be >1 but </= NUMVAR.
id\_
Crack ID number.
node
Crack tip node number.
cont
Contour number.
dtype
Data type to output:
JINT - J-integral
IIN1 - Interaction integral 1
IIN2 - Interaction integral 2
IIN3 - Interaction integral 3
K1 - Mode 1 stress-intensity factor
K2 - Mode 2 stress-intensity factor
K3 - Mode 3 stress-intensity factor
G1 - Mode 1 energy release rate
G2 - Mode 2 energy release rate
G3 - Mode 3 energy release rate
GT - Total energy release rate
MFTX - Total material force X
MFTY - Total material force Y
MFTZ - Total material force Z
CEXT - Crack extension
"""
command = f"CISOL,{n},{id_},{node},{cont},{dtype}"
return self.run(command, **kwargs)
def data(self, ir="", lstrt="", lstop="", linc="", name="", kcplx="",
**kwargs):
"""Reads data records from a file into a variable.
APDL Command: DATA
Parameters
----------
ir
Arbitrary reference number assigned to the resulting variable (2 to
NV [NUMVAR]). If this number is the same as for a previously
defined variable, the previously defined variable will be
overwritten with this result.
lstrt
Start at location LSTRT (defaults to 1).
lstop
Stop at location LSTOP (defaults to LSTRT). Maximum location
available is determined from data previously stored.
linc
Fill every LINC location between LSTRT and LSTOP (defaults to 1).
name
Eight character name for identifying the variable on the printout
and displays. Embedded blanks are compressed upon output.
kcplx
Complex number key:
0 - Data stored as the real part of the complex number.
1 - Data stored as the imaginary part of the complex number.
Notes
-----
This command must be followed by a format statement (on the next line)
and the subsequent data records, and all must be on the same file (that
may then be read with the /INPUT command). The format specifies the
number of fields to be read per record, the field width, and the
placement of the decimal point (if one is not included in the data
value). The read operation follows the available FORTRAN FORMAT
conventions of the system. See the system FORTRAN manual for details.
Any standard FORTRAN real format (such as (4F6.0), (F2.0,2X,F12.0),
etc.) may be used. Integer (I), character (A), and list-directed (*)
descriptors may not be used. The parentheses must be included in the
format. Up to 80 columns per record may be read. Locations may be
filled within a range. Previous data in the range will be overwritten.
"""
command = f"DATA,{ir},{lstrt},{lstop},{linc},{name},{kcplx}"
return self.run(command, **kwargs)
def edread(self, nstart="", label="", num="", step1="", step2="",
**kwargs):
"""Reads explicit dynamics output into variables for time-history
APDL Command: EDREAD
postprocessing.
Parameters
----------
nstart
Starting reference number assigned to the first variable. Allowed
range is 2 (the default) to NV [NUMVAR]. (NV defaults to 30 for an
explicit dynamics analysis.)
label
Label identifying the output file to be read. No default.
GLSTAT - Read data from the GLSTAT file.
MATSUM - Read data from the MATSUM file.
SPCFORC - Read data from the SPCFORC file.
RCFORC - Read data from the RCFORC file.
SLEOUT - Read data from the SLEOUT file.
NODOUT - Read data from the NODOUT file.
RBDOUT - Read data from the RBDOUT file.
num
Number identifying the data set to be read in (defaults to 1). If
Label = GLSTAT, NUM is ignored. If Label = MATSUM or RBDOUT, NUM is
the PART number [EDPART] for which output is desired. If Label =
SPCFORC or NODOUT, NUM is the node number for which output is
desired. If Label = SLEOUT or RCFORC, NUM is the number of the
contact entity for which output is desired.
step1, step2
Load step range of data to be read in. If STEP1 and STEP2 are
blank, all load steps are read in.
Notes
-----
EDREAD reads data from the specified ascii output file so that it may
be used during postprocessing. After EDREAD, you must issue the STORE
command to store the data in time history variables. Once stored, the
variables can be viewed as plots of output item versus time.
The number of variables stored depends on the file specified. The
following table shows the items in each file and the order in which
they are stored. If data items were previously stored in variables
NSTART to NSTART+15, they will be overwritten. If more variables are
needed, change NV on the NUMVAR command. (Note that hourglass energy
will not be available if it was not specified for the model
[EDENERGY,1].)
The following items under MATSUM are listed in the MATSUM ASCII file
(in the Mat no. field) for each part number at time intervals specified
by the EDHTIME command. Use EDREAD,,MATSUM,NUM to specify the part
number that corresponds to the mat number in the MATSUM file.
Resultant contact forces and sliding interface energies are available
from the RCFORC and SLEOUT files, respectively. The RCFORC file is
written for surface based contact types that include target and contact
(master and slave) definitions. You should ensure that this file
contains valid force results before issuing EDREAD,,RCFORC. Only the
resultant contact forces on the master surface are available for time-
history postprocessing.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"EDREAD,{nstart},{label},{num},{step1},{step2}"
return self.run(command, **kwargs)
def enersol(self, nvar="", item="", name="", **kwargs):
"""Specifies the total energies to be stored.
APDL Command: ENERSOL
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV).
"""
command = f"ENERSOL,{nvar},{item},{name}"
return self.run(command, **kwargs)
def esol(self, nvar: MapdlInt = "", elem: MapdlInt = "",
node: MapdlInt = "", item: str = "", comp: str = "",
name: str = "", **kwargs) -> Optional[str]:
"""Specify element data to be stored from the results file.
/POST26 APDL Command: ESOL
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to
NV [NUMVAR]). Overwrites any existing results for this
variable.
elem
Element for which data are to be stored.
node
Node number on this element for which data are to be
stored. If blank, store the average element value (except
for FMAG values, which are summed instead of averaged).
item
Label identifying the item. General item labels are shown
in Table 134: ESOL - General Item and Component Labels
below. Some items also require a component label.
comp
Component of the item (if required). General component
labels are shown in Table 134: ESOL - General Item and
Component Labels below. If Comp is a sequence number (n),
the NODE field will be ignored.
name
Thirty-two character name for identifying the item on the
printout and displays. Defaults to a label formed by
concatenating the first four characters of the Item and
Comp labels.
Examples
--------
Switch to the time-history postprocessor
>>> mapdl.post26()
Store the stress in the X direction for element 1 at node 1
>>> nvar = 2
>>> mapdl.esol(nvar, 1, 1, 'S', 'X')
Move the value to an array and access it via mapdl.parameters
>>> mapdl.dim('ARR', 'ARRAY', 1)
>>> mapdl.vget('ARR', nvar)
>>> mapdl.parameters['ARR']
array(-1991.40234375)
Notes
-----
See Table: 134:: ESOL - General Item and Component Labels for
a list of valid item and component labels for element (except
line element) results.
The ESOL command defines element results data to be stored
from a results file (FILE). Not all items are valid for all
elements. To see the available items for a given element,
refer to the input and output summary tables in the
documentation for that element.
Two methods of data access are available via the ESOL
command. You can access some simply by using a generic label
(component name method), while others require a label and
number (sequence number method).
Use the component name method to access general element data
(that is, element data generally available to most element
types or groups of element types).
The sequence number method is required for data that is not
averaged (such as pressures at nodes and temperatures at
integration points), or data that is not easily described in a
generic fashion (such as all derived data for structural line
elements and contact elements, all derived data for thermal
line elements, and layer data for layered elements).
Element results are in the element coordinate system, except
for layered elements where results are in the layer coordinate
system. Element forces and moments are in the nodal
coordinate system. Results are obtainable for an element at a
specified node. Further location specifications can be made
for some elements via the SHELL, LAYERP26, and FORCE commands.
For more information on the meaning of contact status and its
possible values, see Reviewing Results in POST1 in the Contact
Technology Guide.
"""
command = f"ESOL,{nvar},{elem},{node},{item},{comp},{name}"
return self.run(command, **kwargs)
def file(self, fname="", ext="", **kwargs):
"""Specifies the data file where results are to be found.
APDL Command: FILE
Parameters
----------
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
Notes
-----
Specifies the ANSYS data file where the results are to be found for
postprocessing.
"""
command = f"FILE,{fname},{ext}"
return self.run(command, **kwargs)
def gapf(self, nvar="", num="", name="", **kwargs):
"""Defines the gap force data to be stored in a variable.
APDL Command: GAPF
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV
[NUMVAR]). Overwrites any existing results for this variable.
num
Number identifying gap number for which the gap force is to be
stored. Issue the GPLIST command to display gap numbers.
name
Thirty-two character name for identifying the item on the printout
and displays (defaults to the name GAPF).
Notes
-----
Defines the gap force data to be stored in a variable. Applicable only
to the expansion pass of the mode-superposition linear transient
dynamic (ANTYPE,TRANS) analysis. The data is usually on Fname.RDSP.
"""
command = f"GAPF,{nvar},{num},{name}"
return self.run(command, **kwargs)
def gssol(self, nvar="", item="", comp="", name="", **kwargs):
"""Specifies which results to store from the results file when using
APDL Command: GSSOL
generalized plane strain.
Parameters
----------
nvar
Arbitrary reference number or name assigned to this variable.
Variable numbers can be 2 to NV (NUMVAR) while the name can be an
eight byte character string. Overwrites any existing results for
this variable.
item
Label identifying item to be stored.
LENGTH - Change of fiber length at the ending point.
ROT - Rotation of the ending plane during deformation.
F - Reaction force at the ending point in the fiber direction.
M - Reaction moment applied on the ending plane.
comp
Component of the item, if Item = ROT or M.
X - The rotation angle or reaction moment of the ending plane about X.
Y - The rotation angle or reaction moment of the ending plane about Y.
name
Thirty-two character name identifying the item on the printout and
display. Defaults to the label formed by concatenating the first
four characters of the Item and Comp labels.
Notes
-----
This command stores the results (new position of the ending plane after
deformation) for generalized plane strain. All outputs are in the
global Cartesian coordinate system. For more information about the
generalized plane strain feature, see Generalized Plane Strain Option
of Current-Technology Solid Elements in the Element Reference.
"""
command = f"GSSOL,{nvar},{item},{comp},{name}"
return self.run(command, **kwargs)
def jsol(self, nvar="", elem="", item="", comp="", name="", **kwargs):
"""Specifies result items to be stored for the joint element.
APDL Command: JSOL
Parameters
----------
nvar
Arbitrary reference number or name assigned to this variable.
Variable numbers can be 2 to NV (NUMVAR) while the name can be an
eight-byte character string. Overwrites any existing results for
this variable.
elem
Element number for which to store results.
item
Label identifying the item. Valid item labels are shown in
Table 202: JSOL - Valid Item and Component Labels below.
comp
Component of the Item (if required). Valid component labels are
shown in Table 202: JSOL - Valid Item and Component Labels below.
name
Thirty-two character name identifying the item on printouts and
displays. Defaults to a label formed by concatenating the first
four characters of the Item and Comp labels.
Notes
-----
This command is valid for the MPC184 joint elements. The values stored
are for the free or unconstrained degrees of freedom of a joint
element. Relative reaction forces and moments are available only if
stiffness, damping, or friction is associated with the joint element.
Table: 202:: : JSOL - Valid Item and Component Labels
"""
command = f"JSOL,{nvar},{elem},{item},{comp},{name}"
return self.run(command, **kwargs)
def nsol(self, nvar="", node="", item="", comp="", name="", sector="",
**kwargs):
"""Specifies nodal data to be stored from the results file.
APDL Command: NSOL
Parameters
----------
nvar
Arbitrary reference number or name assigned to this variable.
Variable numbers can be 2 to NV (NUMVAR) while the name can be an
eight byte character string. Overwrites any existing results for
this variable.
node
Node for which data are to be stored.
item
Label identifying the item. Valid item labels are shown in the
table below. Some items also require a component label.
comp
Component of the item (if required). Valid component labels are
shown in the table below.
name
Thirty-two character name identifying the item on printouts and
displays. Defaults to a label formed by concatenating the first
four characters of the Item and Comp labels.
sector
For a full harmonic cyclic symmetry solution, the sector number for
which the results from NODE are to be stored.
Notes
-----
Stores nodal degree of freedom and solution results in a variable. For
more information, see Data Interpreted in the Nodal Coordinate System
in the Modeling and Meshing Guide.
For SECTOR>1, the result is in the nodal coordinate system of the base
sector, and it is rotated to the expanded sector’s location. Refer to
Using the /CYCEXPAND Command in the Cyclic Symmetry Analysis Guide for
more information.
Table: 211:: : NSOL - Valid Item and Component Labels
Table: 212:: : NSOL - Valid Item and Component Labels for ANSYS LS-DYNA
Nodal Results
For SHELL131 and SHELL132 elements with KEYOPT(3) = 0 or 1, use the
labels TBOT, TE2, TE3, . . ., TTOP instead of TEMP.
"""
command = f"NSOL,{nvar},{node},{item},{comp},{name},{sector}"
return self.run(command, **kwargs)
def nstore(self, tinc="", **kwargs):
"""Defines which time points are to be stored.
APDL Command: NSTORE
Parameters
----------
tinc
Store data associated with every TINC time (or frequency) point(s),
within the previously defined range of TMIN to TMAX [TIMERANGE].
(Defaults to 1)
Notes
-----
Defines which time (or frequency) points within the range are to be
stored.
"""
command = f"NSTORE,{tinc}"
return self.run(command, **kwargs)
def numvar(self, nv="", **kwargs):
"""Specifies the number of variables allowed in POST26.
APDL Command: NUMVAR
Parameters
----------
nv
Allow storage for NV variables. 200 maximum are allowed. Defaults
to 10 (except for an explicit dynamics analysis, which defaults to
30). TIME (variable 1) should also be included in this number.
Notes
-----
Specifies the number of variables allowed for data read from the
results file and for data resulting from an operation (if any). For
efficiency, NV should not be larger than necessary. NV cannot be
changed after data storage begins.
"""
command = f"NUMVAR,{nv}"
return self.run(command, **kwargs)
def reset(self, **kwargs):
"""Resets all POST1 or POST26 specifications to initial defaults.
APDL Command: RESET
Notes
-----
Has the same effect as entering the processor the first time within the
run. In POST1, resets all specifications to initial defaults, erases
all element table items, path table data, fatigue table data, and load
case pointers. In POST26, resets all specifications to initial
defaults, erases all variables defined, and zeroes the data storage
space.
"""
command = f"RESET,"
return self.run(command, **kwargs)
def rforce(self, nvar="", node="", item="", comp="", name="", **kwargs):
"""Specifies the total reaction force data to be stored.
APDL Command: RFORCE
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV
[NUMVAR]). Overwrites any existing results for this variable.
node
Node for which data are to be stored. If NODE = P, graphical
picking is enabled (valid only in the GUI).
item
Label identifying the item. Valid item labels are shown in the
table below. Some items also require a component label.
comp
Component of the item (if required). Valid component labels are
shown in the table below.
name
Thirty-two character name identifying the item on printouts and
displays. Defaults to an eight character label formed by
concatenating the first four characters of the Item and Comp
labels.
Notes
-----
Defines the total reaction force data (static, damping, and inertial
components) to be stored from single pass (ANTYPE,STATIC or TRANS)
solutions or from the expansion pass of mode-superposition
(ANTYPE,HARMIC or TRANS) solutions.
Table: 228:: : RFORCE - Valid Item and Component Labels
For SHELL131 and SHELL132 elements with KEYOPT(3) = 0 or 1, use the
labels HBOT, HE2, HE3, . . ., HTOP instead of HEAT.
"""
command = f"RFORCE,{nvar},{node},{item},{comp},{name}"
return self.run(command, **kwargs)
def rgb(self, kywrd="", pred="", pgrn="", pblu="", n1="", n2="", ninc="",
ncntr="", **kwargs):
"""Specifies the RGB color values for indices and contours.
APDL Command: /RGB
Parameters
----------
kywrd
Determines how RGB modifications will be applied.
INDEX - Specifies that subsequent color values apply to ANSYS color indices (0-15).
CNTR - Specifies that subsequent color values apply to contours (1-128). Applies to
C-option devices only (i.e. X11C or Win32C).
pred
Intensity of the color red, expressed as a percentage.
pgrn
Intensity of the color green, expressed as a percentage.
pblu
Intensity of the color blue, expressed as a percentage.
n1
First index (0-15), or contour (1-128) to which the designated RGB
values apply.
n2
Final index (0-15), or contour (1-128) to which the designated RGB
values apply.
ninc
The step increment between the values N1 and N2 determining which
contours or indices will be controlled by the specified RGB values.
ncntr
The new maximum number of contours (1-128).
Notes
-----
Issuing the /CMAP command (with no filename) will restore the default
color settings.
"""
command = f"/RGB,{kywrd},{pred},{pgrn},{pblu},{n1},{n2},{ninc},{ncntr}"
return self.run(command, **kwargs)
def solu(self, nvar="", item="", comp="", name="", **kwargs):
"""Specifies solution summary data per substep to be stored.
APDL Command: SOLU
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV
[NUMVAR]).
item
Label identifying the item. Valid item labels are shown in the
table below. Some items may also require a component label.
comp
Component of the item (if required). Valid component labels are
shown in the table below. None are currently required.
name
Thirty-two character name identifying the item on printouts and
displays. Defaults to an eight character label formed by
concatenating the first four characters of the Item and Comp
labels.
Notes
-----
See also the PRITER command of POST1 to display some of these items
directly. Valid for a static or full transient analysis. All other
analyses have zeros for the data. Valid item and component labels for
solution summary values are:
"""
command = f"SOLU,{nvar},{item},{comp},{name}"
return self.run(command, **kwargs)
def store(self, lab="", npts="", **kwargs):
"""Stores data in the database for the defined variables.
APDL Command: STORE
Parameters
----------
lab
Valid labels:
MERGE - Merge data from results file for the time points in memory with the existing
data using current specifications (default).
NEW - Store a new set of data, replacing any previously stored data with current
result file specifications and deleting any previously-
calculated (OPER) variables. Variables defined using the
ANSOL command are also deleted.
APPEN - Append data from results file to the existing data.
ALLOC - Allocate (and zero) space for NPTS data points.
PSD - Create a new set of frequency points for PSD calculations (replacing any
previously stored data and erasing any previously calculated
data).
npts
The number of time points (or frequency points) for storage (used
only with Lab = ALLOC or PSD). The value may be input when using
POST26 with data supplied from other than a results file. This
value is automatically determined from the results file data with
the NEW, APPEN, and MERGE options. For the PSD option, NPTS
determines the resolution of the frequency vector (valid numbers
are between 1 and 10, defaults to 5).
Notes
-----
This command stores data from the results file in the database for the
defined variables [NSOL, ESOL, SOLU, JSOL] per specification [FORCE,
LAYERP26, SHELL]. See the Basic Analysis Guide for more information.
The STORE,PSD command will create a new frequency vector (variable 1)
for response PSD calculations [RPSD]. This command should first be
issued before defining variables [NSOL, ESOL, RFORCE] for which
response PSD's are to be calculated.
"""
command = f"STORE,{lab},{npts}"
return self.run(command, **kwargs)
def timerange(self, tmin="", tmax="", **kwargs):
"""Specifies the time range for which data are to be stored.
APDL Command: TIMERANGE
Parameters
----------
tmin
Minimum time (defaults to first time (or frequency) point on the
file).
tmax
Maximum time (defaults to last time (or frequency) point on the
file).
Notes
-----
Defines the time (or frequency) range for which data are to be read
from the file and stored in memory. Use the NSTORE command to define
the time increment.
Use PRTIME or PLTIME to specify the time (frequency) range for cyclic
mode-superposition harmonic analyses.
"""
command = f"TIMERANGE,{tmin},{tmax}"
return self.run(command, **kwargs)
def vardel(self, nvar="", **kwargs):
"""Deletes a variable (GUI).
APDL Command: VARDEL
Parameters
----------
nvar
The reference number of the variable to be deleted. NVAR is as
defined by NSOL, ESOL, etc.
Notes
-----
Deletes a POST26 solution results variable. This is a command
generated by the Graphical User Interface (GUI). It will appear in the
log file (Jobname.LOG) if a POST26 variable is deleted from the
"Defined Time-History Variables" dialog box. This command is not
intended to be typed in directly in an ANSYS session (although it can
be included in an input file for batch input or for use with the /INPUT
command).
"""
command = f"VARDEL,{nvar}"
return self.run(command, **kwargs)
def varnam(self, ir="", name="", **kwargs):
"""Names (or renames) a variable.
APDL Command: VARNAM
Parameters
----------
ir
Reference number of the variable (2 to NV [NUMVAR]).
name
Thirty-two character name for identifying variable on printouts and
displays. Embedded blanks are compressed for output.
"""
command = f"VARNAM,{ir},{name}"
return self.run(command, **kwargs)
```
#### File: _commands/preproc/constraint_equations.py
```python
class ConstraintEquations:
def ce(self, neqn="", const="", node1="", lab1="", c1="", node2="",
lab2="", c2="", node3="", lab3="", c3="", **kwargs):
"""Defines a constraint equation relating degrees of freedom.
APDL Command: CE
Parameters
----------
neqn
Set equation reference number:
n - Arbitrary set number.
HIGH - The highest defined constraint equation number. This option is especially
useful when adding nodes to an existing set.
NEXT - The highest defined constraint equation number plus one. This option
automatically numbers coupled sets so that existing sets are
not modified.
const
Constant term of equation.
node1
Node for first term of equation. If -NODE1, this term is deleted
from the equation.
lab1
Degree of freedom label for first term of equation. Structural
labels: UX, UY, or UZ (displacements); ROTX, ROTY, or ROTZ
(rotations, in radians). Thermal labels: TEMP, TBOT, TE2, TE3, . .
., TTOP (temperature). Electric labels: VOLT (voltage). Magnetic
labels: MAG (scalar magnetic potential); AX, AY, or AZ (vector
magnetic potentials). Diffusion label: CONC (concentration).
c1
Coefficient for first node term of equation. If zero, this term is
ignored.
node2, lab2, c2
Node, label, and coefficient for second term.
node3, lab3, c3
Node, label, and coefficient for third term.
Notes
-----
Repeat the CE command to add additional terms to the same equation. To
change only the constant term, repeat the command with no node terms
specified. Only the constant term can be changed during solution, and
only with the CECMOD command.
Linear constraint equations may be used to relate the degrees of
freedom of selected nodes in a more general manner than described for
nodal coupling [CP]. The constraint equation is of the form:
where U(I) is the degree of freedom (displacement, temperature, etc.)
of term (I). The following example is a set of two constraint
equations, each containing three terms:
0.0 = 3.0* (1 UX) + 3.0* (4 UX) + (-2.0)* (4 ROTY)
2.0 = 6.0* (2 UX) + 10.0* (4 UY) + 1.0* (3 UZ)
The first unique degree of freedom in the equation is eliminated in
terms of all other degrees of freedom in the equation. A unique degree
of freedom is one which is not specified in any other constraint
equation, coupled node set, specified displacement set, or master
degree of freedom set. It is recommended that the first term of the
equation be the degree of freedom to be eliminated. The first term of
the equation cannot contain a master degree of freedom, and no term can
contain coupled degrees of freedom. The same degree of freedom may be
specified in more than one equation but care must be taken to avoid
over-specification (over-constraint).
The degrees of freedom specified in the equation (i.e., UX, UY, ROTZ,
etc.) must also be included in the model (as determined from the
element types [ET]). Also, each node in the equation must be defined
on an element (any element type containing that degree of freedom will
do).
For buckling and modal analyses, the constant term of the equation will
not be taken into account (that is, CONST is always zero).
Note that under certain circumstances a constraint equation generated
by CE may be modified during the solution. See Program Modification of
Constraint Equations for more information.
"""
command = f"CE,{neqn},{const},{node1},{lab1},{c1},{node2},{lab2},{c2},{node3},{lab3},{c3}"
return self.run(command, **kwargs)
def cecyc(self, lowname="", highname="", nsector="", hindex="",
tolerance="", kmove="", kpairs="", **kwargs):
"""Generates the constraint equations for a cyclic symmetry analysis
APDL Command: CECYC
Parameters
----------
lowname
Name of a component for the nodes on the low angle edge of the
sector. Enclosed in single quotes.
highname
Name of a component for the nodes on the high angle edge of the
sector. Enclosed in single quotes.
nsector
Number of sectors in the complete 360 degrees.
hindex
Harmonic index to be represented by this set of constraint
equations. If Hindex is -1, generate constraint equations for
static cyclic symmetry. If HIndex is -2, generate constraint
equations for static cyclic asymmetry.
tolerance
A positive tolerance is an absolute tolerance (length units), and a
negative tolerance is a tolerance relative to the local element
size.
kmove
0
0 - Nodes are not moved.
1 - HIGHNAME component nodes are moved to match LOWNAME component nodes exactly.
kpairs
0
0 - Do not print paired nodes
1 - Print table of paired nodes
Notes
-----
The analysis can be either modal cyclic symmetry or static cyclic
symmetry.
The pair of nodes for which constraint equations are written are
rotated into CSYS,1.
"""
command = f"CECYC,{lowname},{highname},{nsector},{hindex},{tolerance},{kmove},{kpairs}"
return self.run(command, **kwargs)
def cedele(self, neqn1="", neqn2="", ninc="", nsel="", **kwargs):
"""Deletes constraint equations.
APDL Command: CEDELE
Parameters
----------
neqn1, neqn2, ninc
Delete constraint equations from NEQN1 to NEQN2 (defaults to NEQN1)
in steps of NINC (defaults to 1). If NEQN1 = ALL, NEQN2 and NINC
will be ignored all constraint equations will be deleted.
nsel
Additional node selection control:
ANY - Delete equation set if any of the selected nodes are in the set (default).
ALL - Delete equation set only if all of the selected nodes are in the set.
"""
command = f"CEDELE,{neqn1},{neqn2},{ninc},{nsel}"
return self.run(command, **kwargs)
def ceintf(self, toler="", dof1="", dof2="", dof3="", dof4="", dof5="",
dof6="", movetol="", **kwargs):
"""Generates constraint equations at an interface.
APDL Command: CEINTF
Parameters
----------
toler
Tolerance about selected elements, based on a fraction of the
element dimension (defaults to 0.25 (25%)). Nodes outside the
element by more than the tolerance are not accepted as being on the
interface.
dof1, dof2, dof3, . . . , dof6
Degrees of freedom for which constraint equations are written.
Defaults to all applicable DOFs. DOF1 accepts ALL as a valid
label, in which case the rest are ignored (all DOFs are applied).
movetol
The allowed "motion" of a node (see Note below). This distance is
in terms of the element coordinates (-1.0 to 1.0). A typical value
is 0.05. Defaults to 0 (do not move). MoveTol must be less than
or equal to TOLER.
Notes
-----
This command can be used to "tie" together two regions with dissimilar
mesh patterns by generating constraint equations that connect the
selected nodes of one region to the selected elements of the other
region. At the interface between regions, nodes should be selected
from the more dense mesh region, A, and the elements selected from the
less dense mesh region, B. The degrees of freedom of region A nodes
are interpolated with the corresponding degrees of freedom of the nodes
on the region B elements, using the shape functions of the region B
elements. Constraint equations are then written that relate region A
and B nodes at the interface.
The MoveTol field lets the nodes in the previously mentioned region A
change coordinates when slightly inside or outside the elements of
region B. The change in coordinates causes the nodes of region A to
assume the same surface as the nodes associated with the elements of
region B. The constraint equations that relate the nodes at both
regions of the interface are then written.
Solid elements with six degrees of freedom should only be interfaced
with other six degree-of-freedom elements. The region A nodes should
be near the region B elements. A location tolerance based on the
smallest region B element length may be input. Stresses across the
interface are not necessarily continuous. Nodes in the interface
region should not have specified constraints.
Use the CPINTF command to connect nodes by coupling instead of
constraint equations. Use the EINTF command to connect nodes by line
elements. See also the NSEL and ESEL commands for selecting nodes and
elements. See the Mechanical APDL Theory Reference for a description
of 3-D space used to determine if a node will be considered by this
command.
As an alternative to the CEINTF command, you can use contact elements
and the internal multipoint constraint (MPC) algorithm to tie together
two regions having dissimilar meshes. See Solid-Solid and Shell-Shell
Assemblies for more information.
"""
command = f"CEINTF,{toler},{dof1},{dof2},{dof3},{dof4},{dof5},{dof6},{movetol}"
return self.run(command, **kwargs)
def celist(self, neqn1="", neqn2="", ninc="", option="", **kwargs):
"""Lists the constraint equations.
APDL Command: CELIST
Parameters
----------
neqn1, neqn2, ninc
List constraint equations from NEQN1 to NEQN2 (defaults to NEQN1)
in steps of NINC (defaults to 1). If NEQN1 = ALL (default), NEQN2
and NINC are ignored and all constraint equations are listed.
option
Options for listing constraint equations:
ANY - List equation set if any of the selected nodes are in the set (default). Only
externally-generated constraint equations are listed.
ALL - List equation set only if all of the selected nodes are in the set. Only
externally-generated constraint equations are listed.
INTE - List internally-generated constraint equations that are associated with MPC-
based contact. Constraint equations are listed only if all
the nodes in the set are selected.
CONV - Convert internal constraint equations to external constraint equations.
Internal constraint equations are converted only if all of
the nodes in the set are selected.
Notes
-----
This command is valid in any processor. However, the INTE and CONV
options are only valid in the Solution processor after a SOLVE command
has been issued.
"""
command = f"CELIST,{neqn1},{neqn2},{ninc},{option}"
return self.run(command, **kwargs)
def cerig(self, maste="", slave="", ldof="", ldof2="", ldof3="", ldof4="",
ldof5="", **kwargs):
"""Defines a rigid region.
APDL Command: CERIG
Parameters
----------
maste
Retained (or master) node for this rigid region. If MASTE = P,
then graphical picking of the master and slave nodes is enabled
(first node picked will be the master node, and subsequent nodes
picked will be slave nodes), and subsequent fields are ignored
(valid only in GUI).
slave
Removed (or slave) node for this rigid region. If ALL, slave nodes
are all selected nodes.
ldof
Degrees of freedom associated with equations:
ALL - All applicable degrees of freedom (default). If 3-D, generate 6 equations
based on UX, UY, UZ, ROTX, ROTY, ROTZ; if 2-D, generate 3
equations based on UX, UY, ROTZ.
UXYZ - Translational degrees of freedom. If 3-D, generate 3 equations based on the
slave nodes' UX, UY, and UZ DOFs and the master node's UX,
UY, UZ, ROTX, ROTY, and ROTZ DOFs; if 2-D, generate 2
equations based on the slave nodes UX and UY DOFs and the
master nodes UX, UY, and ROTZ DOFs. No equations are
generated for the rotational coupling.
RXYZ - Rotational degrees of freedom. If 3-D, generate 3 equations based on ROTX,
ROTY, ROTZ; if 2-D, generate 1 equation based on ROTZ. No
equations are generated for the translational coupling.
UX - Slave translational UX degree of freedom only.
UY - Slave translational UY degree of freedom only.
UZ - Slave translational UZ degree of freedom only.
ROTX - Slave rotational ROTX degree of freedom only.
ROTY - Slave rotational ROTY degree of freedom only.
ROTZ - Slave rotational ROTZ degree of freedom only.
ldof2, ldof3, ldof4, ldof5
Additional degrees of freedom. Used only if more than one degree
of freedom required and Ldof is not ALL, UXYZ, or RXYZ.
Notes
-----
Defines a rigid region (link, area or volume) by automatically
generating constraint equations to relate nodes in the region. Nodes
in the rigid region must be assigned a geometric location before this
command is used. Also, nodes must be connected to elements having the
required degree of freedom set (see Ldof above). Generated constraint
equations are based on small deflection theory. Generated constraint
equations are numbered beginning from the highest previously defined
equation number (NEQN) plus 1. Equations, once generated, may be
listed [CELIST] or modified [CE] as desired. Repeat CERIG command for
additional rigid region equations.
This command will generate the constraint equations needed for defining
rigid lines in 2-D or 3-D space. Multiple rigid lines relative to a
common point are used to define a rigid area or a rigid volume. In 2-D
space, with Ldof = ALL, three equations are generated for each pair of
constrained nodes. These equations define the three rigid body motions
in global Cartesian space, i.e., two in-plane translations and one in-
plane rotation. These equations assume the X-Y plane to be the active
plane with UX, UY, and ROTZ degrees of freedom available at each node.
Other types of equations can be generated with the appropriate Ldof
labels.
Six equations are generated for each pair of constrained nodes in 3-D
space (with Ldof = ALL). These equations define the six rigid body
motions in global Cartesian space. These equations assume that UX, UY,
UZ, ROTX, ROTY, and ROTZ degrees of freedom are available at each node.
The UXYZ label allows generating a partial set of rigid region
equations. This option is useful for transmitting the bending moment
between elements having different degrees of freedom at a node. With
this option only two of the three equations are generated for each pair
of constrained nodes in 2-D space. In 3-D space, only three of the six
equations are generated. In each case the rotational coupling
equations are not generated. Similarly, the RXYZ label allows
generating a partial set of equations with the translational coupling
equations omitted.
Applying this command to a large number of slave nodes may result in
constraint equations with a large number of coefficients. This may
significantly increase the peak memory required during the process of
element assembly. If real memory or virtual memory is not available,
consider reducing the number of slave nodes.
Note that under certain circumstances the constraint equations
generated by CERIG may be modified during the solution. See Program
Modification of Constraint Equations for more information.
As an alternative to the CERIG command, you can define a similar type
of rigid region using contact elements and the internal multipoint
constraint (MPC) algorithm. See Surface-Based Constraints for more
information.
CERIG cannot be deleted using CEDELE,ALL and then regenerated in the
second or higher load steps if the LSWRITE and LSSOLVE procedure is
used. CERIG writes constraint equations directly into load step files.
Deleting constraint equations (CEDELE,ALL) cannot always maintain the
consistency among load steps.
"""
command = f"CERIG,{maste},{slave},{ldof},{ldof2},{ldof3},{ldof4},{ldof5}"
return self.run(command, **kwargs)
def cesgen(self, itime="", inc="", nset1="", nset2="", ninc="", **kwargs):
"""Generates a set of constraint equations from existing sets.
APDL Command: CESGEN
Parameters
----------
itime, inc
Do this generation operation a total of ITIMEs, incrementing all
nodes in the existing sets by INC each time after the first. ITIME
must be >1 for generation to occur.
nset1, nset2, ninc
Generate sets from sets beginning with NSET1 to NSET2 (defaults to
NSET1) in steps of NINC (defaults to 1). If NSET1 is negative,
NSET2 and NINC are ignored and the last |NSET1| sets (in sequence
from maximum set number) are used as the sets to be repeated.
Notes
-----
Generates additional sets of constraint equations (with same labels)
from existing sets. Node numbers between sets may be uniformly
incremented.
"""
command = f"CESGEN,{itime},{inc},{nset1},{nset2},{ninc}"
return self.run(command, **kwargs)
def rbe3(self, master="", dof="", slaves="", wtfact="", **kwargs):
"""Distributes the force/moment applied at the master node to a set of
APDL Command: RBE3
slave nodes, taking into account the geometry of the slave nodes as
well as weighting factors.
Parameters
----------
master
Node at which the force/moment to be distributed will be applied.
This node must be associated with an element for the master node to
be included in the DOF solution.
dof
Refers to the master node degrees of freedom to be used in
constraint equations. Valid labels are: UX, UY, UZ, ROTX, ROTY,
ROTZ, UXYZ, RXYZ, ALL
slaves
The name of an array parameter that contains a list of slave nodes.
Must specify the starting index number. ALL can be used for
currently selected set of nodes. The slave nodes may not be
colinear, that is, not be all located on the same straight line
(see Notes below).
wtfact
The name of an array parameter that contains a list of weighting
factors corresponding to each slave node above. Must have the
starting index number. If not specified, the weighting factor for
each slave node defaults to 1.
Notes
-----
The force is distributed to the slave nodes proportional to the
weighting factors. The moment is distributed as forces to the slaves;
these forces are proportional to the distance from the center of
gravity of the slave nodes times the weighting factors. Only the
translational degrees of freedom of the slave nodes are used for
constructing the constraint equations. Constraint equations are
converted to distributed forces/moments on the slave nodes during
solution.
RBE3 creates constraint equations such that the motion of the master is
the average of the slaves. For the rotations, a least-squares approach
is used to define the "average rotation" at the master from the
translations of the slaves. If the slave nodes are colinear, then one
of the master rotations that is parallel to the colinear direction can
not be determined in terms of the translations of the slave nodes.
Therefore, the associated moment component on the master node in that
direction can not be transmitted. When this case occurs, a warning
message is issued and the constraint equations created by RBE3 are
ignored.
Applying this command to a large number of slave nodes may result in
constraint equations with a large number of coefficients. This may
significantly increase the peak memory required during the process of
element assembly. If real memory or virtual memory is not available,
consider reducing the number of slave nodes.
As an alternative to the RBE3 command, you can apply a similar type of
constraint using contact elements and the internal multipoint
constraint (MPC) algorithm. See Surface-based Constraints for more
information.
This command is also valid in SOLUTION.
"""
command = f"RBE3,{master},{dof},{slaves},{wtfact}"
return self.run(command, **kwargs)
```
#### File: _commands/preproc/database.py
```python
class Database:
def aflist(self, **kwargs):
"""Lists the current data in the database.
APDL Command: AFLIST
Notes
-----
Lists the current data and specifications in the database. If batch,
lists all appropriate data. If interactive, lists only summaries.
"""
command = "AFLIST,"
return self.run(command, **kwargs)
def cdread(self, option="", fname="", ext="", fnamei="", exti="", **kwargs):
"""Reads a file of solid model and database information into the database.
APDL Command: CDREAD
Parameters
----------
option
Selects which data to read:
ALL - Read all geometry, material property, load, and
component data (default). Solid model geometry and loads
will be read from the file Fnamei.Exti. All other data
will be read from the file Fname.Ext.
DB - Read all database information contained in file
Fname.Ext. This file should contain all information
mentioned above except the solid model loads. If reading a
.CDB file written with the GEOM option of the CDWRITE
command, element types [ET] compatible with the
connectivity of the elements on the file must be defined
prior to reading.
SOLID - Read the solid model geometry and solid model
loads from the file Fnamei.Exti. This file could have
been written by the CDWRITE or IGESOUT command.
COMB - Read the combined solid model and database
information from the file Fname.Ext.
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
fnamei
Name of the IGES file and its directory path (248 characters
maximum, including directory). If you do not specify a directory
path, it will default to your working directory and you can use all
248 characters for the file name.
exti
Filename extension (eight-character maximum).
Notes
-----
This command causes coded files of solid model (in IGES format) and
database (in command format) information to be read. These files are
normally written by the CDWRITE or IGESOUT command. Note that the
active coordinate system in these files has been reset to Cartesian
(CSYS,0).
If a set of data exists prior to the CDREAD operation, that data set is
offset upward to allow the new data to fit without overlap. The
NOOFFSET command allows this offset to be ignored on a set-by-set
basis, causing the existing data set to be overwritten with the new
data set.
When you write the geometry data using the CDWRITE,GEOM option, you use
the CDREAD,DB option to read the geometry information.
Using the CDREAD,COMB option will not write NUMOFF commands to offset
entity ID numbers if there is no solid model in the database.
Multiple CDB file imports cannot have elements with real constants in
one file and section definitions in another. The section attributes
will override the real constant attributes. If you use CDREAD to
import multiple CDB files, define all of the elements using only real
constants, or using only section definitions. Combining real constants
and section definitions is not recommended.
This command is valid in any processor.
"""
return self.run(f"CDREAD,{option},{fname},{ext},,{fnamei},{exti}", **kwargs)
def cdwrite(self, option="", fname="", ext="", fnamei="", exti="",
fmat="", **kwargs):
"""Writes geometry and load database items to a file.
APDL Command: CDWRITE
Parameters
----------
option
Selects which data to write:
ALL - Write all appropriate geometry, material property,
load, and component data (default). Two files will
be produced. Fname.Ext will contain all data items
mentioned in "Notes", except the solid model
data. Fnamei.Exti will contain the solid model
geometry and solid model loads data in the form of
IGES commands. This option is not valid when
CDOPT,ANF is active.
COMB - Write all data mentioned, but to a single file,
Fname.Ext. Solid model geometry data will be
written in either IGES or ANF format as specified
in the CDOPT command, followed by the remainder of
the data in the form of ANSYS commands. More
information on these (IGES/ANF) file formats is
provided in "Notes".
DB - Write all database information except the solid model
and solid model loads to Fname.Ext in the form of
ANSYS commands. This option is not valid when
CDOPT,ANF is active.
SOLID - Write only the solid model geometry and solid
model load data. This output will be in IGES or
ANF format, as specified in the CDOPT
command. More information on these (IGES/ANF) file
formats is provided in "Notes".
GEOM - Write only element and nodal geometry data. Neither
solid model geometry nor element attribute data
will be written. One file, Fname.Ext, will be
produced. Use CDREAD,DB to read in a file written
with this option. Element types [ET] compatible
with the connectivity of the elements on the file
must first be defined before reading the file in
with CDREAD,DB.
CM - Write only node and element component and geometry
data to Fname.Ext.
MAT - Write only material property data (both linear and
nonlinear) to Fname.Ext.
LOAD - Write only loads for current load step to
Fname.Ext.
SECT - Write only section data to Fname.Ext. Pretension
sections are not included.
fname
File name and directory path (248 characters maximum,
including the characters needed for the directory path).
An unspecified directory path defaults to the working
directory; in this case, you can use all 248 characters
for the file name.
ext
Filename extension (eight-character maximum).
fnamei
Name of the IGES file and its directory path (248
characters maximum, including directory). If you do not
specify a directory path, it will default to your working
directory and you can use all 248 characters for the file
name.
exti
Filename extension (eight-character maximum).
fmat
Format of the output file (defaults to BLOCKED).
BLOCKED - Blocked format. This format allows faster
reading of the output file. The time savings is
most significant when BLOCKED is used to read
.cdb files associated with very large models.
UNBLOCKED - Unblocked format.
Returns
-------
str
Mapdl command output.
Examples
--------
Create a basic block and save it to disk.
>>> mapdl.prep7()
>>> mapdl.block(0, 1, 0, 1, 0, 1)
>>> mapdl.et(1, 186)
>>> mapdl.esize(0.25)
>>> mapdl.vmesh('ALL')
>>> mapdl.cdwrite('DB', '/tmp/mesh.cdb')
TITLE =
NUMBER OF ELEMENT TYPES = 1
64 ELEMENTS CURRENTLY SELECTED. MAX ELEMENT NUMBER = 64
425 NODES CURRENTLY SELECTED. MAX NODE NUMBER = 425
8 KEYPOINTS CURRENTLY SELECTED. MAX KEYPOINT NUMBER = 8
12 LINES CURRENTLY SELECTED. MAX LINE NUMBER = 12
6 AREAS CURRENTLY SELECTED. MAX AREA NUMBER = 6
1 VOLUMES CURRENTLY SELECTED. MAX VOL. NUMBER = 1
WRITE ANSYS DATABASE AS AN ANSYS INPUT FILE: /tmp/mesh.cdb
Optionally load the mesh into Python using the archive reader.
>>> from ansys.mapdl import reader as pymapdl_reader
>>> mesh = pymapdl_reader.Archive('/tmp/mesh.cdb')
>>> mesh
ANSYS Archive File mesh.cdb
Number of Nodes: 425
Number of Elements: 64
Number of Element Types: 1
Number of Node Components: 0
Number of Element Components: 0
Notes
-----
Load data includes the current load step only. Loads applied
to the solid model (if any) are automatically transferred to
the finite element model when this command is issued. CDWRITE
writes out solid model loads for meshed models only. If the
model is not meshed, the solid model loads cannot be
saved. Component data include component definitions, but not
assembly definitions. Appropriate NUMOFF commands are included
at the beginning of the file; this is to avoid overlap of an
existing database when the file is read in.
Element order information (resulting from a WAVES command) is
not written. The data in the database remain untouched.
Solution control commands are typically not written to the
file unless you specifically change a default solution
setting.
CDWRITE does not support the GSBDATA and GSGDATA commands, and
these commands are not written to the file.
The data may be reread (on a different machine, for example)
with the CDREAD command. Caution: When the file is read in,
the NUMOFF,MAT command may cause a mismatch between material
definitions and material numbers referenced by certain loads
and element real constants. See NUMOFF for details. Also, be
aware that the files created by the CDWRITE command explicitly
set the active coordinate system to Cartesian (CSYS,0).
You should generally use the blocked format (Fmat = BLOCKED)
when writing out model data with CDWRITE. This is a compressed
data format that greatly reduces the time required to read
large models through the CDREAD command. The blocked and
unblocked formats are described in Chapter 3 of the Guide to
Interfacing with ANSYS.
If you use CDWRITE in any of the derived products (ANSYS Emag,
ANSYS Professional), then before reading the file, you must
edit the Jobname.cdb file to remove commands that are not
available in the respective component product.
The CDWRITE command writes PART information for any ANSYS
LS-DYNA input file to the Jobname.cdb file via the EDPREAD
command. (EDPREAD is not a documented command; it is written
only when the CDWRITE command is issued.) The PART information
can be automatically read in via the CDREAD command; however,
if more than one Jobname.cdb file is read, the PART list from
the last Jobname.cdb file overwrites the existing PART list of
the total model. This behavior affects all PART-related
commands contained in the Jobname.cdb file. You can join
models, but not PART-related inputs, which you must modify
using the newly-created PART numbers. In limited cases, an
update of the PART list (EDWRITE,PUPDATE) is possible; doing
so requires that no used combination of MAT/TYPE/REAL appears
more than once in the list.
The CDWRITE command does not support (for beam meshing) any
line operation that relies on solid model associativity. For
example, meshing the areas adjacent to the meshed line,
plotting the line that contains the orientation nodes, or
clearing the mesh from the line that contains orientation
nodes may not work as expected. For more information about
beam meshing, see Meshing Your Solid Model in the Modeling and
Meshing Guide.
IGES and ANF File Formats for Solid Model Geometry Information
The format used for solid model geometry information is
determined by the current CDOPT command setting. The default
format is IGES.
IGES option (default) to write solid model information (CDOPT,
IGS):
Before writing solid model entities, select all corresponding
lower level entities (ALLSEL,BELOW,ALL).
"""
command = f"CDWRITE,{option},'{fname}',{ext},,{fnamei},{exti},{fmat}"
return self.run(command, **kwargs)
def cdopt(self, option="", **kwargs):
"""Specifies format to be used for archiving geometry.
APDL Command: CDOPT
Parameters
----------
option
IGES
IGES - Write solid model geometry information using IGES format (default).
ANF - Write solid model geometry information using ANSYS Neutral File format.
STAT - Print out the current format setting.
Notes
-----
This command controls your solid model geometry format for CDWRITE
operations. The ANF option affects only the COMB and SOLID options of
the CDWRITE command. All other options remain unaffected.
This option setting is saved in the database.
"""
command = "CDOPT,%s" % (str(option))
return self.run(command, **kwargs)
def cecheck(self, itemlab="", tolerance="", dof="", **kwargs):
"""Check constraint equations and couplings for rigid body motions.
APDL Command: CECHECK
Parameters
----------
itemlab
Item indicating what is to be checked:
CE - Check constraint equations only
CP - Check couplings only
ALL - Check both CE and CP
tolerance
Allowed amount of out-of-balance for any constraint equation or
coupled set. The default value of 1.0e-6 is usually good.
dof
Specifies which DOF is to be checked. Default is RIGID, the usual
option. Other choices are individual DOF such as UX, ROTZ, etc. or
THERM. The THERM option will check the constraint equations or
coupled sets for free thermal expansions, whereas the individual
DOFs check under rigid body motions. ALL is RIGID and THERM.
Notes
-----
This command imposes a rigid body motion on the nodes attached to the
constraint equation or coupled set and makes sure that no internal
forces are generated for such rigid body motions. Generation of
internal forces by rigid body motions usually indicates an error in the
equation specification (possibly due to nodal coordinate rotations).
The THERM option does a similar check to see that no internal forces
are created by the equations if the body does a free thermal expansion
(this check assumes a single isotropic coefficient of expansion).
"""
command = "CECHECK,%s,%s,%s" % (str(itemlab), str(tolerance), str(dof))
return self.run(command, **kwargs)
def check(self, sele="", levl="", **kwargs):
"""Checks current database items for completeness.
APDL Command: CHECK
Parameters
----------
sele
Specifies which elements are to be checked:
(blank) - Check all data.
ESEL - Check only elements in the selected set and unselect any elements not producing
geometry check messages. The remaining elements (those
producing check messages) can then be displayed and
corrected. A null set results if no elements produce a
message. Issue ESEL,ALL to select all elements before
proceeding.
levl
Used only with Sele = ESEL:
WARN - Select elements producing warning and error messages.
ERR - Select only elements producing error messages (default).
Notes
-----
This command will not work if SHPP,OFF has been set. A similar,
automatic check of all data is done before the solution begins.
If the "Check Elements" option is invoked through the GUI (menu path
Main Menu> Preprocessor> Meshing> Check Elems), the CHECK,ESEL logic is
used to highlight elements in the following way: good elements are
blue, elements having warnings are yellow, and bad (error) elements are
red.
Note:: : The currently selected set of elements is not changed by this
GUI function.
This command is also valid in PREP7.
"""
command = "CHECK,%s,%s" % (str(sele), str(levl))
return self.run(command, **kwargs)
def cncheck(self, option="", rid1="", rid2="", rinc="", intertype="",
trlevel="", cgap="", cpen="", ioff="", **kwargs):
"""Provides and/or adjusts the initial status of contact pairs.
APDL Command: CNCHECK
Parameters
----------
option
Option to be performed:
DETAIL - List all contact pair properties (default).
SUMMARY - List only the open/closed status for each contact pair.
POST - Execute a partial solution to write the initial
contact configuration to the Jobname.RCN file.
ADJUST - Physically move contact nodes to the target in
order to close a gap or reduce penetration. The
initial adjustment is converted to structural
displacement values (UX, UY, UZ) and stored in
the Jobname.RCN file.
RESET - Reset target element and contact element key
options and real constants to their default
values. This option is not valid for general
contact.
AUTO - Automatically sets certain real constants and key
options to recommended values or settings in order
to achieve better convergence based on overall
contact pair behaviors. This option is not valid
for general contact.
TRIM - Trim contact pair (remove certain contact and target elements).
UNSE - Unselect certain contact and target elements.
rid1, rid2, rinc
For pair-based contact, the range of real constant pair
ID's for which Option will be performed. If RID2 is not
specified, it defaults to RID1. If no value is specified,
all contact pairs in the selected set of elements are
considered.
intertype
The type of contact interface (pair-based versus general
contact) to be considered; or the type of contact pair to
be trimmed/unselected/auto-set.
(blank) - Include all contact definitions (pair-based and general contact).
GCN - Include general contact definitions only (not valid
when Option = RESET or AUTO).
trlevel
Trimming level (used only when Option = TRIM or UNSE):
(blank) - Normal trimming (default): remove/unselect
contact and target elements which are in
far-field.
AGGRE - Aggressive trimming: remove/unselect contact and
target elements which are in far-field, and
certain elements in near-field.
Notes
-----
The CNCHECK command provides information for
surface-to-surface, node- to-surface, and line-to-line contact
pairs (element types TARGE169, TARGE170, CONTA171, CONTA172,
CONTA173, CONTA174, CONTA175, CONTA176, CONTA177). All contact
and target elements of interest, along with the solid elements
and nodes attached to them, must be selected for the command
to function properly. For performance reasons, the program
uses a subset of nodes and elements based on the specified
contact regions (RID1, RID2, RINC) when executing the CNCHECK
command.
CNCHECK is available in both the PREP7 and SOLUTION
processors, but only before the first solve operation (that
is, only before the first load step or the first substep).
If the contact and target elements were generated through mesh
commands (AMESH, LMESH, etc.) instead of the ESURF command,
you must issue MODMSH,DETACH before CNCHECK. Otherwise,
CNCHECK will not work correctly.
The following additional notes are available:
The command CNCHECK,POST solves the initial contact configuration in
one substep. After issuing this command, you can postprocess the
contact result items as you would for any other converged load step;
however, only the contact status, contact penetration or gap, and
contact pressure will have meaningful values. Other contact quantities
(friction stress, sliding distance, chattering) will be available but
are not useful.
Because Option = POST forces a solve operation, the PrepPost (PP)
license does not work with CNCHECK,POST.
"""
command = "CNCHECK,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (str(option), str(rid1), str(
rid2), str(rinc), str(intertype), str(trlevel), str(cgap), str(cpen), str(ioff))
return self.run(command, **kwargs)
def igesout(self, fname="", ext="", att="", **kwargs):
"""Writes solid model data to a file in IGES Version 5.1 format.
APDL Command: IGESOUT
Parameters
----------
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
att
Attribute key:
0 - Do not write assigned numbers and attributes of the
solid model entities to the IGES file (default).
1 - Write assigned numbers and attributes of solid model
entities (keypoints, lines, areas, volumes) to the
IGES file. Attributes include MAT, TYPE, REAL, and
ESYS specifications as well as associated solid model
loads and meshing (keypoint element size, number of
line divisions and spacing ratio) specifications.
Notes
-----
Causes the selected solid model data to be written to a coded
file in the IGES Version 5.1 format. Previous data on this
file, if any, are overwritten. Keypoints that are not
attached to any line are written to the output file as IGES
entity 116 (Point). Lines that are not attached to any area
are written to the output file as either IGES Entity 100
(Circular Arc), 110 (Line), or 126 (Rational B-Spline Curve)
depending upon whether the ANSYS entity was defined as an arc,
straight line, or spline. Areas are written to the output
file as IGES Entity 144 (Trimmed Parametric Surface). Volumes
are written to the output file as IGES entity 186 (Manifold
Solid B-Rep Object). Solid model entities to be written must
have all corresponding lower level entities selected (use
ALLSEL,BELOW,ALL) before issuing command. Concatenated lines
and areas are not written to the IGES file; however, the
entities that make up these concatenated entities are written.
Caution:: : Section properties assigned to areas, lines and
other solid model entities will not be maintained when the
model is exported using IGESOUT.
If you issue the IGESOUT command after generating a beam mesh
with orientation nodes, the orientation keypoints that were
specified for the line (LATT) are no longer associated with
the line and are not written out to the IGES file. The line
does not recognize that orientation keypoints were ever
assigned to it, and the orientation keypoints do not "know"
that they are orientation keypoints. Thus the IGESOUT command
does not support (for beam meshing) any line operation that
relies on solid model associativity. For example, meshing the
areas adjacent to the meshed line, plotting the line that
contains the orientation nodes, or clearing the mesh from the
line that contains orientation nodes may not work as expected.
See Meshing Your Solid Model in the Modeling and Meshing Guide
for more information about beam meshing.
"""
return self.run(f"IGESOUT,{fname},{ext},,{att}", **kwargs)
def mfimport(self, fnumb="", option="", fname="", ext="", **kwargs):
"""Imports a new field into a current ANSYS Multi-field solver analysis.
APDL Command: MFIMPORT
Parameters
----------
fnumb
Field number specified by the MFELEM command.
option
Selects data to read.
DB - Reads a CDB file. The CDB file name and extension are specified by Fname and
Ext.
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
Notes
-----
The field to be imported should be written to a CDB file (CDWRITE
command). This file is read into the database, offsetting all existing
element type numbers, node numbers, etc. in order to accommodate the
imported field. (See the NUMOFF command for information on offset
capabilities.) It then updates all of the previously issued MFxx
commands to the new element type numbers. A new field is created using
the specified field number, which must not currently exist. If there
are no ANSYS Multi-field solver command files written for the existing
fields in the database, one will be written for each field with the
default name (see the MFCMMAND command). A MFCMMAND will be issued for
the imported field as well.
Repeat the MFIMPORT command to import additional fields.
See Multi-field Commands in the Coupled-Field Analysis Guide for a list
of all ANSYS Multi-field solver commands and their availability for MFS
and MFX analyses.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = "MFIMPORT,%s,%s,%s,%s" % (
str(fnumb), str(option), str(fname), str(ext))
return self.run(command, **kwargs)
def nooffset(self, label="", **kwargs):
"""Prevents the CDREAD command from offsetting specified data items
APDL Command: NOOFFSET
Parameters
----------
label
Specifies items not to be offset.
NODE - Node numbers
ELEM - Element numbers
KP - Keypoint numbers
LINE - Line numbers
AREA - Area numbers
VOLU - Volume numbers
MAT - Material numbers
TYPE - Element type numbers
REAL - Real constant numbers
CSYS - Coordinate system numbers
SECN - Section numbers
CP - Coupled set numbers
CE - Constraint equation numbers
CLEAR - All items will be offset
STATUS - Shows which items are specified notto be offset.
Notes
-----
The NOOFFSET command specifies data items not to be offset by a set of
data read from a CDREAD command.
"""
command = "NOOFFSET,%s" % (str(label))
return self.run(command, **kwargs)
def numcmp(self, label="", **kwargs):
"""Compresses the numbering of defined items.
APDL Command: NUMCMP
Parameters
----------
label
Items to be compressed:
NODE - Node numbers
ELEM - Element numbers
KP - Keypoint numbers
LINE - Line numbers
AREA - Area numbers
VOLU - Volume numbers
MAT - Material numbers
TYPE - Element type numbers
REAL - Real constant numbers
CP - Coupled set numbers
SECN - Section numbers
CE - Constraint equation numbers
ALL - All item numbers
Notes
-----
The NUMCMP command effectively compresses out unused item
numbers by renumbering all the items, beginning with one and
continuing throughout the model. The renumbering order
follows the initial item numbering order (that is, compression
lowers the maximum number by "sliding" numbers down to take
advantage of unused or skipped numbers). All defined items
are renumbered, regardless of whether or not they are actually
used or selected. Applicable related items are also checked
for renumbering as described for the merge operation (NUMMRG).
Compressing material numbers (NUMCMP,ALL or NUMCMP,MAT) does
not update the material number referenced by either of the
following:
A temperature-dependent convection or surface-to-surface
radiation load (SF, SFE, SFL, SFA)
Real constants for multi-material elements (such as SOLID65)
Compression is usually not required unless memory space is limited and
there are large gaps in the numbering sequence.
"""
command = "NUMCMP,%s" % (str(label))
return self.run(command, **kwargs)
def nummrg(self, label="", toler="", gtoler="", action="", switch="",
**kwargs):
"""Merges coincident or equivalently defined items.
APDL Command: NUMMRG
Parameters
----------
label
Items to be merged:
NODE - Nodes
ELEM - Elements
KP - Keypoints (will also merge lines, areas, and volumes)
MAT - Materials
TYPE - Element types
REAL - Real constants
CP - Coupled sets
CE - Constraint equations
ALL - All items
toler
Range of coincidence. For Label = NODE and KP, defaults to 1.0E-4
(based on maximum Cartesian coordinate difference between nodes or
keypoints). For Label = MAT, REAL, and CE, defaults to 1.0E-7
(based on difference of the values normalized by the values). Only
items within range are merged. (For keypoints attached to lines,
further restrictions apply. See the GTOLER field and Merging Solid
Model Entities below.)
gtoler
Global solid model tolerance -- used only when merging keypoints
attached to lines. If specified, GTOLER will override the internal
relative solid model tolerance. See Merging Solid Model Entities
below.
action
Specifies whether to merge or select coincident items.
SELE - Select coincident items but do not merge. Action = SELE is only valid for Label
= NODE.
(Blank) - Merge the coincident items (default).
switch
Specifies whether the lowest or highest numbered coincident item is
retained after the merging operation. This option does not apply
to keypoints; i.e., for Label = KP, the lowest numbered keypoint is
retained regardless of the Switch setting.
LOW - Retain the lowest numbered coincident item after the merging operation
(default).
HIGH - Retain the highest numbered coincident item after the merging operation.
Notes
-----
After issuing the command, the area and volume sizes (ASUM and VSUM)
may give slightly different results. In order to obtain the same
results as before, use /FACET, /NORMAL, and ASUM / VSUM.
The merge operation is useful for tying separate, but coincident, parts
of a model together. If not all items are to be checked for merging,
use the select commands (NSEL, ESEL, etc.) to select items. Only
selected items are included in the merge operation for nodes,
keypoints, and elements.
By default, the merge operation retains the lowest numbered coincident
item. Higher numbered coincident items are deleted. Set Switch to
HIGH to retain the highest numbered coincident item after the merging
operation. Applicable related items are also checked for deleted item
numbers and if found, are replaced with the retained item number. For
example, if nodes are merged, element connectivities (except
superelements), mesh item range associativity, coupled degrees of
freedom, constraint equations, master degrees of freedom, gap
conditions, degree of freedom constraints, nodal force loads, nodal
surface loads, and nodal body force loads are checked. Merging
material numbers [NUMMRG,ALL or NUMMRG,MAT] does not update the
material number referenced:
By temperature-dependent film coefficients as part of convection load
or a temperature-dependent emissivity as part of a surface-to-surface
radiation load [SF, SFE, SFL, SFA]
By real constants for multi-material elements (such as SOLID65)
If a unique load is defined among merged nodes, the value is kept and
applied to the retained node. If loads are not unique (not
recommended), only the value on the lowest node (or highest if Switch =
HIGH) will be kept, except for "force" loads for which the values will
be summed if they are not defined using tabular boundary conditions.
Note:: The unused nodes (not recommended) in elements, couplings,
constraint equations, etc. may become active after the merge operation.
The Action field provides the option of visualizing the coincident
items before the merging operation.
Caution:: When merging entities in a model that has already been
meshed, the order in which you issue multiple NUMMRG commands is
significant. If you want to merge two adjacent meshed regions that
have coincident nodes and keypoints, always merge nodes [NUMMRG,NODE]
before merging keypoints [NUMMRG,KP]. Merging keypoints before nodes
can result in some of the nodes becoming "orphaned"; that is, the nodes
lose their association with the solid model. Orphaned nodes can cause
certain operations (such as boundary condition transfers, surface load
transfers, and so on) to fail. However, using NUMMRG should be avoided
if at all possible, as the procedure outlined above may even cause
meshing failure, especially after multiple merging and meshing
operations.
After a NUMMRG,NODE, is issued, some nodes may be attached to more than
one solid entity. As a result, subsequent attempts to transfer solid
model loads to the elements may not be successful. Issue NUMMRG,KP to
correct this problem. Do NOT issue VCLEAR before issuing NUMMRG,KP.
For NUMMRG,ELEM, elements must be identical in all aspects, including
the direction of the element coordinate system.
For certain solid and shell elements (181, 185, 190, etc) ANSYS will
interpret coincident faces as internal and eliminate them. To prevent
this from occurring, shrink the entities by a very small factor to
delineate coincident items (/SHRINK, 0.0001) and no internal nodes,
lines, areas or elements will be eliminated.
When working with solid models, you may have better success with the
gluing operations (AGLUE, LGLUE, VGLUE). Please read the following
information when attempting to merge solid model entities.
Gluing Operations vs. Merging Operations
Adjacent, touching regions can be joined by gluing them (AGLUE, LGLUE,
VGLUE) or by merging coincident keypoints (NUMMRG,KP, which also causes
merging of identical lines, areas, and volumes). In many situations,
either approach will work just fine. Some factors, however, may lead to
a preference for one method over the other.
Geometric Configuration
Gluing is possible regardless of the initial alignment or offset of the
input entities. Keypoint merging is possible only if each keypoint on
one side of the face to be joined is matched by a coincident keypoint
on the other side. This is commonly the case after a symmetry
reflection (ARSYM or VSYMM) or a copy (AGEN or VGEN), especially for a
model built entirely in ANSYS rather than imported from a CAD system.
When the geometry is extremely precise, and the configuration is
correct for keypoint merging, NUMMRG is more efficient and robust than
AGLUE or VGLUE.
"""
command = "NUMMRG,%s,%s,%s,%s,%s" % (str(label), str(
toler), str(gtoler), str(action), str(switch))
return self.run(command, **kwargs)
def numoff(self, label="", value="", **kwargs):
"""Adds a number offset to defined items.
APDL Command: NUMOFF
Parameters
----------
label
Apply offset number to one of the following sets of items:
NODE - Nodes
ELEM - Elements
KP - Keypoints
LINE - Lines
AREA - Areas
VOLU - Volumes
MAT - Materials
TYPE - Element types
REAL - Real constants
CP - Coupled sets
SECN - Section numbers
CE - Constraint equations
CSYS - Coordinate systems
value
Offset number value (cannot be negative).
Notes
-----
Useful for offsetting current model data to prevent overlap if another
model is read in. CDWRITE automatically writes the appropriate NUMOFF
commands followed by the model data to File.CDB. Therefore, when the
file is read, any model already existing in the database is offset
before the model data on the file is read.
Offsetting material numbers with this command [NUMOFF,MAT] does not
update the material number referenced by either of the following:
A temperature-dependent convection or surface-to-surface radiation load
[SF, SFE, SFL, SFA]
Real constants for multi-material elements (such as SOLID65).
Therefore, a mismatch may exist between the material definitions and
the material numbers referenced.
"""
command = "NUMOFF,%s,%s" % (str(label), str(value))
return self.run(command, **kwargs)
def numstr(self, label="", value="", **kwargs):
"""Establishes starting numbers for automatically numbered items.
APDL Command: NUMSTR
Parameters
----------
label
Apply starting number to one of the following sets of items:
NODE - Node numbers. Value defaults (and is continually reset) to 1 + maximum node
number in model. Cannot be reset lower.
ELEM - Element numbers. Value defaults (and is continually reset) to 1 + maximum
element number in model. Cannot be reset lower.
KP - Keypoint numbers. Value defaults to 1. Only undefined numbers are used.
Existing keypoints are not overwritten.
LINE - Line numbers. Value defaults to 1. Only undefined numbers are used. Existing
lines are not overwritten.
AREA - Area numbers. Value defaults to 1. Only undefined numbers are used. Existing
areas are not overwritten.
VOLU - Volume numbers. Value defaults to 1. Only undefined numbers are used.
Existing volumes are not overwritten.
DEFA - Default. Returns all starting numbers to their default values.
value
Starting number value.
Notes
-----
Establishes starting numbers for various items that may have numbers
automatically assigned (such as element numbers with the EGEN command,
and node and solid model entity numbers with the mesh [AMESH, VMESH,
etc.] commands). Use NUMSTR,STAT to display settings. Use NUMSTR,DEFA
to reset all specifications back to defaults. Defaults may be lowered
by deleting and compressing items (i.e., NDELE and NUMCMP,NODE for
nodes, etc.).
Note:: : A mesh clear operation (VCLEAR, ACLEAR, LCLEAR, and KCLEAR)
automatically sets starting node and element numbers to the highest
unused numbers. If a specific starting node or element number is
desired, issue NUMSTR after the clear operation.
"""
command = "NUMSTR,%s,%s" % (str(label), str(value))
return self.run(command, **kwargs)
```
#### File: _commands/preproc/keypoints.py
```python
import re
from ansys.mapdl.core._commands import parse
class KeyPoints:
def k(self, npt="", x="", y="", z="", **kwargs) -> int:
"""Define a keypoint.
APDL Command: K
Parameters
----------
npt
Reference number for keypoint. If zero, the lowest
available number is assigned [NUMSTR].
x, y, z
Keypoint location in the active coordinate system (may be
R, θ, Z or R, θ, Φ).
Returns
-------
int
The keypoint number of the generated keypoint.
Examples
--------
Create keypoint at ``(0, 1, 2)``
>>> knum = mapdl.k('', 0, 1, 2)
>>> knum
1
Create keypoint at ``(10, 11, 12)`` while specifying the
keypoint number.
>>> knum = mapdl.k(5, 10, 11, 12)
>>> knum
5
Notes
-----
Defines a keypoint in the active coordinate system [CSYS] for
line, area, and volume descriptions. A previously defined
keypoint of the same number will be redefined. Keypoints may
be redefined only if it is not yet attached to a line or is
not yet meshed. Solid modeling in a toroidal system is not
recommended.
"""
command = f"K,{npt},{x},{y},{z}"
msg = self.run(command, **kwargs)
if msg:
if not re.search(r"KEYPOINT NUMBER", msg):
res = re.search(r"(KEYPOINT\s*)([0-9]+)", msg)
else:
res = re.search(r"(KEYPOINT NUMBER =\s*)([0-9]+)", msg)
if res:
return int(res.group(2))
def kbetw(self, kp1="", kp2="", kpnew="", type_="", value="", **kwargs) -> int:
"""Creates a keypoint between two existing keypoints.
APDL Command: KBETW
Parameters
----------
kp1
First keypoint.
kp2
Second keypoint.
kpnew
Number assigned to the new keypoint. Defaults to the
lowest available keypoint number.
type\_
Type of input for VALUE.
RATIO - Value is the ratio of the distances between keypoints as follows:
``(KP1-KPNEW)/(KP1-KP2)``.
DIST - Value is the absolute distance between KP1 and
KPNEW (valid only if current coordinate system is
Cartesian).
value
Location of new keypoint, as defined by Type (defaults to
0.5). If VALUE is a ratio (Type = RATIO) and is less than
0 or greater than 1, the keypoint is created on the
extended line. Similarly, if VALUE is a distance (Type =
DIST) and is less than 0 or greater than the distance
between KP1 and KP2, the keypoint is created on the
extended line.
Returns
-------
int
Keypoint number of the generated keypoint.
Examples
--------
Create a keypoint exactly centered between two keypoints.
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 1, 0, 0)
>>> k2 = mapdl.kbetw(k0, k1)
>>> k2
3
Notes
-----
Placement of the new keypoint depends on the currently active
coordinate system [CSYS]. If the coordinate system is
Cartesian, the keypoint will lie on a straight line between
KP1 and KP2. If the system is not Cartesian (e.g.,
cylindrical, spherical, etc.), the keypoint will be located as
if on a line (which may not be straight) created in the
current coordinate system between KP1 and KP2. Note that
solid modeling in a toroidal coordinate system is not
recommended.
"""
command = f"KBETW,{kp1},{kp2},{kpnew},{type_},{value}"
return parse.parse_kpoint(self.run(command, **kwargs))
def kcenter(self, type_="", val1="", val2="", val3="", val4="", kpnew="",
**kwargs) -> int:
"""Creates a keypoint at the center of a circular arc defined
by three locations.
APDL Command: KCENTER
Parameters
----------
type\_
Type of entity used to define the circular arc. The
meaning of VAL1 through VAL4 will vary depending on Type.
KP - Arc is defined by keypoints.
LINE - Arc is defined by locations on a line.
val1, val2, val3, val4
Values used to specify three locations on the arc (see table
below).
kpnew
Number assigned to new keypoint. Defaults to the lowest available
keypoint number.
Returns
-------
int
Keypoint number of the generated keypoint.
Examples
--------
Create a keypoint at the center of a circle centered at (0, 0, 0)
>>> k0 = mapdl.k("", 0, 1, 0)
>>> k1 = mapdl.k("", 1, 0, 0)
>>> k2 = mapdl.k("", 0, -1, 0)
>>> k3 = mapdl.kcenter('KP', k0, k1, k2)
>>> k3
4
Notes
-----
KCENTER should be used in the Cartesian coordinate system
(CSYS,0) only. This command provides three methods to define
a keypoint at the center of three locations.
Three keypoints:
, - ~ ~ ~ - ,
,(x)(VAL2) ' , <--- imaginary circluar arc
, ,
, ,
, ,
(x)(VAL1) (x)(KPNEW) (x)(VAL3)
"""
command = f"KCENTER,{type_},{val1},{val2},{val3},{val4},{kpnew}"
return parse.parse_kpoint(self.run(command, **kwargs))
def kdele(self, np1="", np2="", ninc="", **kwargs):
"""Deletes unmeshed keypoints.
APDL Command: KDELE
Parameters
----------
np1, np2, ninc
Delete keypoints from NP1 to NP2 (defaults to NP1) in steps of NINC
(defaults to 1). If NP1 = ALL, NP2 and NINC are ignored and all
selected keypoints [KSEL] are deleted. If NP1 = P, graphical
picking is enabled and all remaining command fields are ignored
(valid only in the GUI). A component name may also be substituted
for NP1 (NP2 and NINC are ignored).
Notes
-----
Deletes selected keypoints. A keypoint attached to a line cannot be
deleted unless the line is first deleted.
"""
command = f"KDELE,{np1},{np2},{ninc}"
return self.run(command, **kwargs)
def kdist(self, kp1="", kp2="", **kwargs) -> list:
"""Calculates and lists the distance between two keypoints.
APDL Command: KDIST
Parameters
----------
kp1
First keypoint in distance calculation.
kp2
Second keypoint in distance calculation.
Returns
-------
list
``[X, Y, Z]`` distance between two keypoints.
Examples
--------
Compute the distance between two keypoints.
>>> kp0 = (0, 10, -3)
>>> kp1 = (1, 5, 10)
>>> knum0 = mapdl.k("", *kp0)
>>> knum1 = mapdl.k("", *kp1)
>>> dist = mapdl.kdist(knum0, knum1)
>>> dist
[1.0, -5.0, 13.0]
Notes
-----
KDIST lists the distance between keypoints KP1 and KP2, as
well as the current coordinate system offsets from KP1 to KP2,
where the X, Y, and Z locations of KP1 are subtracted from the
X, Y, and Z locations of KP2 (respectively) to determine the
offsets. KDIST is valid in any coordinate system except
toroidal [CSYS,3].
This command is valid in any processor.
"""
return parse.parse_kdist(self.run(f"KDIST,{kp1},{kp2}", **kwargs))
def kfill(self, np1="", np2="", nfill="", nstrt="", ninc="", space="",
**kwargs):
"""Generates keypoints between two keypoints.
APDL Command: KFILL
Parameters
----------
np1, np2
Beginning and ending keypoints for fill-in. NP1 defaults to next
to last keypoint specified, NP2 defaults to last keypoint
specified. If NP1 = P, graphical picking is enabled and all
remaining command fields are ignored (valid only in the GUI).
nfill
Fill NFILL keypoints between NP1 and NP2 (defaults to |NP2-NP1|-1).
NFILL must be positive.
nstrt
Keypoint number assigned to first filled-in keypoint (defaults to
NP1 + NINC).
ninc
Add this increment to each of the remaining filled-in keypoint
numbers (may be positive or negative). Defaults to
(NP2-NP1)/(NFILL + 1), i.e., linear interpolation.
space
Spacing ratio. Ratio of last division size to first division size.
If > 1.0, divisions increase. If < 1.0, divisions decrease. Ratio
defaults to 1.0 (uniform spacing).
Notes
-----
Generates keypoints (in the active coordinate system) between two
existing keypoints. The two keypoints may have been defined in any
coordinate system. However, solid modeling in a toroidal coordinate
system is not recommended. Any number of keypoints may be filled in
and any keypoint numbering sequence may be assigned.
"""
command = f"KFILL,{np1},{np2},{nfill},{nstrt},{ninc},{space}"
return self.run(command, **kwargs)
def kgen(self, itime="", np1="", np2="", ninc="", dx="", dy="", dz="",
kinc="", noelem="", imove="", **kwargs):
"""Generates additional keypoints from a pattern of keypoints.
APDL Command: KGEN
Parameters
----------
itime
Do this generation operation a total of ITIME times, incrementing
all keypoints in the given pattern automatically (or by KINC) each
time after the first. ITIME must be more than 1 for generation to
occur.
np1, np2, ninc
Generate keypoints from the pattern of keypoints beginning with NP1
to NP2 (defaults to NP1) in steps of NINC (defaults to 1). If NP1
= ALL, NP2 and NINC are ignored and the pattern is all selected
keypoints [KSEL]. If NP1 is negative, NP2 and NINC are ignored and
the last |NP1| keypoints (in sequence from the highest keypoint
number) are used as the pattern to be repeated. If NP1 = P,
graphical picking is enabled and all remaining command fields are
ignored (valid only in the GUI). A component name may also be
substituted for NP1 (NP2 and NINC are ignored).
dx, dy, dz
Keypoint location increments in the active coordinate system (DR,
Dθ, DZ for cylindrical, DR, Dθ, DΦ for spherical).
kinc
Keypoint increment between generated sets. If zero, the lowest
available keypoint numbers are assigned [NUMSTR].
noelem
Specifies if elements and nodes are also to be generated:
0 - Generate nodes and point elements associated with the original keypoints, if
they exist.
1 - Do not generate nodes and elements.
imove
Specifies whether keypoints will be moved or newly defined:
0 - Generate additional keypoints as requested with the ITIME argument.
1 - Move original keypoints to new position retaining the same keypoint numbers
(ITIME, KINC, and NOELEM are ignored). Valid only if the old
keypoints are no longer needed at their original positions.
Corresponding meshed items are also moved if not needed at
their original position.
Notes
-----
Generates additional keypoints (and corresponding mesh) from a given
keypoint pattern. The MAT, TYPE, REAL, and ESYS attributes are based
upon the keypoints in the pattern and not upon the current settings.
Generation is done in the active coordinate system. Keypoints in the
pattern may have been defined in any coordinate system. However, solid
modeling in a toroidal coordinate system is not recommended.
"""
command = f"KGEN,{itime},{np1},{np2},{ninc},{dx},{dy},{dz},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def kl(self, nl1="", ratio="", nk1="", **kwargs) -> int:
"""Generates a keypoint at a specified location on an existing line.
APDL Command: KL
Parameters
----------
nl1
Number of the line. If negative, the direction of line
(as interpreted for RATIO) is reversed.
ratio
Ratio of line length to locate keypoint. Must be between
0.0 and 1.0. Defaults to 0.5 (divide the line in half).
nk1
Number to be assigned to keypoint generated at division
location (defaults to lowest available keypoint number
[NUMSTR]).
Returns
-------
int
Keypoint number of the generated keypoint.
Examples
--------
Create a keypoint on a line from (0, 0, 0) and (10, 0, 0)
>>> kp0 = (0, 0, 0)
>>> kp1 = (10, 0, 0)
>>> knum0 = mapdl.k("", *kp0)
>>> knum1 = mapdl.k("", *kp1)
>>> lnum = mapdl.l(knum0, knum1)
>>> lnum
1
"""
msg = self.run(f"KL,{nl1},{ratio},{nk1}", **kwargs)
if msg:
res = re.search(r'KEYPOINT\s+(\d+)\s+', msg)
if res is not None:
return int(res.group(1))
def klist(self, np1="", np2="", ninc="", lab="", **kwargs):
"""Lists the defined keypoints or hard points.
APDL Command: KLIST
Parameters
----------
np1, np2, ninc
List keypoints from NP1 to NP2 (defaults to NP1) in steps of NINC
(defaults to 1). If NP1 = ALL (default), NP2 and NINC are ignored
and all selected keypoints [KSEL] are listed. If NP1 = P,
graphical picking is enabled and all remaining command fields are
ignored (valid only in the GUI). A component name may also be
substituted for NP1 (NP2 and NINC are ignored).
lab
Coordinate listing key:
(blank) - List all keypoint information.
COORD - Suppress all but the keypoint coordinates (shown to a higher degree of accuracy
than when displayed with all information).
HPT - List only hard point information.
Notes
-----
Lists keypoints in the active display coordinate system [DSYS]. An
attribute (TYPE, MAT, REAL, or ESYS) listed as a zero is unassigned;
one listed as a positive value indicates that the attribute was
assigned with the KATT command (and will not be reset to zero if the
mesh is cleared); one listed as a negative value indicates that the
attribute was assigned using the attribute pointer [TYPE, MAT, REAL, or
ESYS] that was active during meshing (and will be reset to zero if the
mesh is cleared).
This command is valid in any processor.
"""
command = f"KLIST,{np1},{np2},{ninc},{lab}"
return self.run(command, **kwargs)
def kmodif(self, npt="", x="", y="", z="", **kwargs):
"""Modifies an existing keypoint.
APDL Command: KMODIF
Parameters
----------
npt
Modify coordinates of this keypoint. If NPT = ALL, modify
coordinates of all selected keypoints [KSEL]. If NPT = P,
graphical picking is enabled and all remaining command fields are
ignored (valid only in the GUI). A component name may also be
substituted for NPT.
x, y, z
Replace the previous coordinate values assigned to this keypoint
with these corresponding coordinate values. Values are interpreted
according to the active coordinate system (R, θ, Z for cylindrical,
R, θ,Φ for spherical). If X = P, graphical picking is used to
locate keypoint and Y and Z are ignored. A blank retains the
previous value. You cannot specify Y = P.
Notes
-----
Lines, areas, and volumes attached to the modified keypoint (if any)
must all be selected and will be redefined using the active coordinate
system. However, solid modeling in a toroidal coordinate system is not
recommended.
Caution:: : Redefined entities may be removed from any defined
components and assemblies. Nodes and elements will be automatically
cleared from any redefined keypoints, lines, areas, or volumes.
The KMODIF command moves keypoints for geometry modification without
validating underlying entities. To merge keypoints and update higher
order entities, issue the NUMMRG command instead.
"""
command = f"KMODIF,{npt},{x},{y},{z}"
return self.run(command, **kwargs)
def kmove(self, npt="", kc1="", x1="", y1="", z1="", kc2="", x2="", y2="",
z2="", **kwargs):
"""Calculates and moves a keypoint to an intersection.
APDL Command: KMOVE
Parameters
----------
npt
Move this keypoint. If NPT = P, graphical picking is enabled and
all remaining command fields are ignored (valid only in the GUI).
A component name may also be substituted for NPT.
kc1
First coordinate system number. Defaults to 0 (global Cartesian).
x1, y1, z1
Input one or two values defining the location of the keypoint in
this coordinate system. Input "U" for unknown value(s) to be
calculated and input "E" to use an existing coordinate value.
Fields are R1, θ1, Z1 for cylindrical, or R1, θ1, ϕ1 for spherical.
kc2
Second coordinate system number.
x2, y2, z2
Input two or one value(s) defining the location of the keypoint in
this coordinate system. Input "U" for unknown value(s) to be
calculated and input "E" to use an existing coordinate value.
Arguments are R2, θ2, Z2 for cylindrical, or R2, θ2, ϕ2 for
spherical.
Notes
-----
Calculates and moves a keypoint to an intersection location. The
keypoint must have been previously defined (at an approximate location)
or left undefined (in which case it is internally defined at the SOURCE
location). The actual location is calculated from the intersection of
three surfaces (implied from three coordinate constants in two
different coordinate systems). Note that solid modeling in a toroidal
coordinate system is not recommended. See the MOVE command for surface
and intersection details. The three (of six) constants easiest to
define should be used. The program will calculate the remaining three
coordinate constants. All arguments, except KC1, must be input. Use
the repeat command [*REPEAT] after the KMOVE command to move a series
of keypoints, if desired.
"""
command = f"KMOVE,{npt},{kc1},{x1},{y1},{z1},{kc2},{x2},{y2},{z2}"
return self.run(command, **kwargs)
def knode(self, npt="", node="", **kwargs) -> int:
"""Defines a keypoint at an existing node location.
APDL Command: KNODE
Parameters
----------
npt
Arbitrary reference number for keypoint. If zero, the
lowest available number is assigned [NUMSTR].
node
Node number defining global X, Y, Z keypoint location. A
component name may also be substituted for NODE.
Returns
-------
int
Keypoint number of the generated keypoint.
Examples
--------
Create a keypoint at a node at (1, 2, 3)
>>> nnum = mapdl.n('', 1, 2, 3)
>>> knum1 = mapdl.knode('', nnum)
>>> knum1
1
"""
msg = self.run(f"KNODE,{npt},{node}", **kwargs)
if msg:
res = re.search(r'KEYPOINT NUMBER =\s+(\d+)', msg)
if res is not None:
return int(res.group(1))
def kplot(self, np1="", np2="", ninc="", lab="", **kwargs):
"""Displays the selected keypoints.
APDL Command: KPLOT
Parameters
----------
np1, np2, ninc
Display keypoints from NP1 to NP2 (defaults to NP1) in steps of
NINC (defaults to 1). If NP1 = ALL (default), NP2 and NINC are
ignored and all selected keypoints [KSEL] are displayed.
lab
Determines what keypoints are plotted (one of the following):
(blank) - Plots all keypoints.
HPT - Plots only those keypoints that are hard points.
Notes
-----
This command is valid in any processor.
"""
command = f"KPLOT,{np1},{np2},{ninc},{lab}"
return self.run(command, **kwargs)
def kpscale(self, np1="", np2="", ninc="", rx="", ry="", rz="", kinc="",
noelem="", imove="", **kwargs):
"""Generates a scaled set of (meshed) keypoints from a pattern of
APDL Command: KPSCALE
keypoints.
Parameters
----------
np1, np2, ninc
Set of keypoints (NP1 to NP2 in steps of NINC) that defines the
pattern to be scaled. NP2 defaults to NP1, NINC defaults to 1. If
NP1 = ALL, NP2 and NINC are ignored and the pattern is defined by
all selected keypoints. If NP1 = P, graphical picking is enabled
and all remaining command fields are ignored (valid only in the
GUI). A component name may also be substituted for NP1 (NP2 and
NINC are ignored).
rx, ry, rz
Scale factors to be applied to the X, Y, Z keypoint coordinates in
the active coordinate system (RR, Rθ, RZ for cylindrical; RR, Rθ,
RΦ for spherical). The Rθ and RΦ scale factors are interpreted as
angular offsets. For example, if CSYS = 1, an RX, RY, RZ input of
(1.5,10,3) would scale the specified keypoints 1.5 times in the
radial and 3 times in the Z direction, while adding an offset of 10
degrees to the keypoints.) Zero, blank, or negative scale factor
values are assumed to be 1.0. Zero or blank angular offsets have
no effect.
kinc
Increment to be applied to the keypoint numbers for generated set.
If zero, the lowest available keypoint numbers will be assigned
[NUMSTR].
noelem
Specifies whether nodes and elements are also to be generated:
0 - Nodes and point elements associated with the original keypoints will be
generated (scaled) if they exist.
1 - Nodes and point elements will not be generated.
imove
Specifies whether keypoints will be moved or newly defined:
0 - Additional keypoints will be generated.
1 - Original keypoints will be moved to new position (KINC and NOELEM are ignored).
Use only if the old keypoints are no longer needed at their
original positions. Corresponding meshed items are also moved
if not needed at their original position.
Notes
-----
Generates a scaled set of keypoints (and corresponding mesh) from a
pattern of keypoints. The MAT, TYPE, REAL, and ESYS attributes are
based on the keypoints in the pattern and not the current settings.
Scaling is done in the active coordinate system. Keypoints in the
pattern could have been generated in any coordinate system. However,
solid modeling in a toroidal coordinate system is not recommended.
"""
command = f"KPSCALE,{np1},{np2},{ninc},{rx},{ry},{rz},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def kscale(self, kinc="", np1="", np2="", ninc="", rx="", ry="", rz="",
**kwargs):
"""Generates a scaled pattern of keypoints from a given keypoint pattern.
APDL Command: KSCALE
Parameters
----------
kinc
Do this scaling operation one time, incrementing all keypoints in
the given pattern by KINC. If KINC = 0, keypoints will be
redefined at the scaled locations.
np1, np2, ninc
Scale keypoints from pattern beginning with NP1 to NP2 (defaults to
NP1) in steps of NINC (defaults to 1). If NP1 = ALL, NP2 and NINC
are ignored and pattern is all selected keypoints [KSEL]. If NP1 =
P, graphical picking is enabled and all remaining command fields
are ignored (valid only in the GUI). A component name may also be
substituted for NP1 (NP2 and NINC are ignored).
rx, ry, rz
Scale factor ratios. Scaling is relative to the origin of the
active coordinate system (RR, Rθ, RZ for cylindrical, RR, Rθ, RΦ
for spherical). If > 1.0, pattern is enlarged. If < 1.0, pattern
is reduced. Ratios each default to 1.0.
Notes
-----
Generates a scaled pattern of keypoints from a given keypoint pattern.
Scaling is done in the active coordinate system (see analogous node
scaling [NSCALE]). Solid modeling in a toroidal coordinate system is
not recommended.
"""
command = f"KSCALE,{kinc},{np1},{np2},{ninc},{rx},{ry},{rz}"
return self.run(command, **kwargs)
def ksum(self, **kwargs):
"""Calculates and prints geometry statistics of the selected keypoints.
APDL Command: KSUM
Notes
-----
Calculates and prints geometry statistics (centroid location, moments
of inertia, etc.) associated with the selected keypoints. Geometry
items are reported in the global Cartesian coordinate system. A unit
density is assumed, irrespective of any material associations [KATT,
MAT]. Items calculated by KSUM and later retrieved by a *GET or *VGET
command are valid only if the model is not modified after the KSUM
command is issued.
"""
command = f"KSUM,"
return self.run(command, **kwargs)
def ksymm(self, ncomp="", np1="", np2="", ninc="", kinc="", noelem="",
imove="", **kwargs):
"""Generates a reflected set of keypoints.
APDL Command: KSYMM
Parameters
----------
ncomp
Symmetry key:
X - X (or R) symmetry (default).
Y - Y (or θ) symmetry.
Z - Z (or Φ) symmetry.
np1, np2, ninc
Reflect keypoints from pattern beginning with NP1 to NP2 (defaults
to NP1) in steps of NINC (defaults to 1). If NP1 = ALL, NP2 and
NINC are ignored and pattern is all selected keypoints [KSEL]. If
Ncomp = P, graphical picking is enabled and all remaining command
fields are ignored (valid only in the GUI). A component name may
also be substituted for NP1 (NP2 and NINC are ignored).
kinc
Keypoint increment between sets. If zero, the lowest available
keypoint numbers are assigned [NUMSTR].
noelem
Specifies whether nodes and elements are also to be generated:
0 - Generate nodes and point elements associated with the original keypoints, if
they exist.
1 - Do not generate nodes and elements.
imove
Specifies whether keypoints will be moved or newly defined:
0 - Generate additional keypoints.
1 - Move original keypoints to new position retaining the same keypoint numbers
(KINC and NOELEM are ignored). Valid only if the old keypoints
are no longer needed at their original positions.
Corresponding meshed items are also moved if not needed at
their original position.
Notes
-----
Generates a reflected set of keypoints (and corresponding mesh) from a
given keypoint pattern by a symmetry reflection (see analogous node
symmetry command, NSYM). The MAT, TYPE, REAL, and ESYS attributes are
based upon the keypoints in the pattern and not upon the current
settings. Reflection is done in the active coordinate system by
changing a particular coordinate sign. Keypoints in the pattern may
have been generated in any coordinate system. However, solid modeling
in a toroidal coordinate system is not recommended.
"""
command = f"KSYMM,{ncomp},{np1},{np2},{ninc},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def ktran(self, kcnto="", np1="", np2="", ninc="", kinc="", noelem="",
imove="", **kwargs):
"""Transfers a pattern of keypoints to another coordinate system.
APDL Command: KTRAN
Parameters
----------
kcnto
Reference number of coordinate system where the pattern is to be
transferred. Transfer occurs from the active coordinate system.
np1, np2, ninc
Transfer keypoints from pattern beginning with NP1 to NP2 (defaults
to NP1) in steps of NINC (defaults to 1). If NP1 = ALL, NP2 and
NINC are ignored and pattern is all selected keypoints [KSEL]. If
NP1 = P, graphical picking is enabled and all remaining command
fields are ignored (valid only in the GUI). A component name may
also be substituted for NP1 (NP2 and NINC are ignored).
kinc
Keypoint increment between sets. If zero, the lowest available
keypoint numbers are assigned [NUMSTR].
noelem
Specifies whether nodes and elements are also to be generated:
0 - Generate nodes and point elements associated with the original keypoints, if
they exist.
1 - Do not generate nodes and elements.
imove
Specifies whether keypoints will be moved or newly defined:
0 - Generate additional keypoints.
1 - Move original keypoints to new position retaining the same keypoint numbers
(KINC and NOELEM are ignored). Valid only if the old keypoints
are no longer needed at their original positions.
Corresponding meshed items are also moved if not needed at
their original position.
Notes
-----
Transfers a pattern of keypoints (and corresponding mesh) from one
coordinate system to another (see analogous node transfer command,
TRANSFER). The MAT, TYPE, REAL, and ESYS attributes are based upon the
keypoints in the pattern and not upon the current settings. Coordinate
systems may be translated and rotated relative to each other. Initial
pattern may be generated in any coordinate system. Coordinate values
are interpreted in the active coordinate system and are transferred
directly. Solid modeling in a toroidal coordinate system is not
recommended.
"""
command = f"KTRAN,{kcnto},{np1},{np2},{ninc},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def source(self, x="", y="", z="", **kwargs):
"""Defines a default location for undefined nodes or keypoints.
APDL Command: SOURCE
Parameters
----------
x, y, z
Global Cartesian coordinates for source nodes or keypoints
(defaults to the origin).
Notes
-----
Defines a global Cartesian location for undefined nodes or keypoints
moved during intersection calculations [MOVE or KMOVE].
"""
command = f"SOURCE,{x},{y},{z}"
return self.run(command, **kwargs)
```
#### File: _commands/preproc/material_data_tables.py
```python
class MaterialDataTables:
def tb(self, lab="", mat="", ntemp="", npts="", tbopt="", eosopt="",
funcname="", **kwargs):
"""APDL Command: TB
Activates a data table for material properties or special element
input.
Parameters
----------
lab
Material model data table type:
AFDM - Acoustic frequency-dependent material.
AHYPER - Anisotropic hyperelasticity.
ANEL - Anisotropic elasticity.
ANISO - Anisotropic plasticity.
BB - Bergstrom-Boyce.
BH - Magnetic field data.
BISO - Bilinear isotropic hardening using von Mises or Hill plasticity.
BKIN - Bilinear kinematic hardening using von Mises or Hill plasticity.
CAST - Cast iron.
CDM - Mullins effect (for isotropic hyperelasticity models).
CGCR - Fracture criterion for crack-growth simulation (CGROW).
CHABOCHE - Chaboche nonlinear kinematic hardening using von Mises or Hill plasticity.
COMP - Composite damage (explicit dynamic analysis).
CONCR - Concrete element data.
CREEP - Creep. Pure creep, creep with isotropic hardening plasticity, or creep with
kinematic hardening plasticity using both von Mises or Hill
potentials.
CTE - Secant coefficient of thermal expansion.
CZM - Cohesive zone.
DISCRETE - Explicit spring-damper (discrete).
DMGE - Damage evolution law.
DMGI - Damage initiation criteria.
DP - Classic Drucker-Prager plasticity.
DPER - Anisotropic electric permittivity.
EDP - Extended Drucker-Prager (for granular materials such as rock, concrete, soil,
ceramics and other pressure-dependent materials).
ELASTIC - Elasticity. For full harmonic analyses, properties can be defined as frequency-
or temperature-dependent (TBFIELD).
EOS - Equation of state (explicit dynamic analysis).
EVISC - Viscoelastic element data (explicit dynamic analysis).
EXPE - Experimental data.
FCON - Fluid conductance data (explicit dynamic analysis).
FCLI - Material strength limits for calculating failure criteria.
FLUID - Fluid.
FOAM - Foam (explicit dynamic analysis).
FRIC - Coefficient of friction based on Coulomb's Law or user-defined friction.
GASKET - Gasket.
GCAP - Geological cap (explicit dynamic analysis).
GURSON - Gurson pressure-dependent plasticity for porous metals.
HFLM - Film coefficient data.
HILL - Hill anisotropy. When combined with other material options, simulates
plasticity, viscoplasticity, and creep -- all with the Hill
potential.
HONEY - Honeycomb (explicit dynamic analysis).
HYPER - Hyperelasticity material models (Arruda-Boyce, Blatz-Ko, Extended Tube, Gent,
Mooney-Rivlin [default], Neo-Hookean, Ogden, Ogden Foam,
Polynomial Form, Response Function, Yeoh, and user-
defined).
"""
command = "TB,%s,%s,%s,%s,%s,%s,%s" % (str(lab), str(mat), str(
ntemp), str(npts), str(tbopt), str(eosopt), str(funcname))
return self.run(command, **kwargs)
def tbcopy(self, lab="", matf="", matt="", **kwargs):
"""APDL Command: TBCOPY
Copies a data table from one material to another.
Parameters
----------
lab
Data table label. See the TB command for valid labels, and see
"Notes" for Lab = ALL.
matf
Material reference number where data table is to be copied from.
matt
Material reference number where data table is to be copied to.
Notes
-----
The TBCOPY command, with Lab = ALL, copies all of the nonlinear data
defined by the TB command. If you copy a model that includes both yield
behavior constants and linear constants (for example, a BKIN model),
TBCOPY,ALL and MPCOPY are used together to copy the entire model. All
input data associated with the model is copied, that is, all data
defined through the TB and MP commands.
Also, if you copy a material model using the Material Model Interface
(Edit> Copy), both the commands TBCOPY,ALL and MPCOPY are issued,
regardless of whether the model includes linear constants only, or if
it includes a combination of linear and yield behavior constants.
This command is also valid in SOLUTION.
"""
command = "TBCOPY,%s,%s,%s" % (str(lab), str(matf), str(matt))
return self.run(command, **kwargs)
def tbdata(self, stloc="", c1="", c2="", c3="", c4="", c5="", c6="",
**kwargs):
"""APDL Command: TBDATA
Defines data for the material data table.
Parameters
----------
stloc
Starting location in table for entering data. For example, if
STLOC = 1, data input in the C1 field applies to the first table
constant, C2 applies to the second table constant, etc. If
STLOC=5, data input in the C1 field applies to the fifth table
constant, etc. Defaults to the last location filled + 1. The last
location is reset to 1 with each TB or TBTEMP command.
c1, c2, c3, . . . , c6
Data values assigned to six locations starting with STLOC. If a
value is already in this location, it is redefined. A blank value
leaves the existing value unchanged.
Notes
-----
Defines data for the table specified on the last TB command at the
temperature specified on the last TBTEMP command (if applicable). The
type of data table specified in the last TB command determines the
number of data values needed in TBDATA. Data values are linearly
interpolated for temperatures that fall between user defined TBTEMP
values. See Material Models in the Material Reference for the number of
data values required for different material behavior options.
This command is also valid in SOLUTION.
"""
command = "TBDATA,%s,%s,%s,%s,%s,%s,%s" % (
str(stloc), str(c1), str(c2), str(c3), str(c4), str(c5), str(c6))
return self.run(command, **kwargs)
def tbdele(self, lab="", mat1="", mat2="", inc="", **kwargs):
"""APDL Command: TBDELE
Deletes previously defined material data tables.
Parameters
----------
lab
Data table label. (See the TB command for valid labels.) If ALL,
delete all data tables.
mat1, mat2, inc
Delete tables for materials MAT1 to (MAT2 defaults to MAT1) in
steps of INC (defaults to 1). If MAT1= ALL, ignore MAT2 and INC
and delete data tables for all materials.
Notes
-----
This command is also valid in SOLUTION.
"""
command = "TBDELE,%s,%s,%s,%s" % (str(lab), str(mat1), str(mat2), str(inc))
return self.run(command, **kwargs)
def tbeo(self, par="", value="", **kwargs):
"""APDL Command: TBEO
Sets special options or parameters for material data tables.
Parameters
----------
par
Parameter name:
CAPCREEPREG - Available for the viscoplasticity/creep model (TB,CREEP), allows two creep
models to be specified via the same material ID when
used with the Extended Drucker-Prager model (TB,EDP).
value
Parameter value:
SHEA - Use the shear stress-state creep model with the Extended Drucker-Prager model.
Valid only when Par = CAPCREEPREG.
COMP - Use the compaction stress-state creep model with the Extended Drucker-Prager
model. Valid only when Par = CAPCREEPREG.
Notes
-----
Issue the TBEO command after activating the data table (TB) but before
defining data for the table (TBDATA) or a point on a nonlinear data
curve (TBPT).
"""
command = "TBEO,%s,%s" % (str(par), str(value))
return self.run(command, **kwargs)
def tbfield(self, type_="", value="", **kwargs):
"""APDL Command: TBFIELD
Defines values of field variables for material data tables.
Parameters
----------
type_
Type of field variable:
FREQ - A frequency is to be specified in Value
TEMP - A temperature is to be specified in Value
TIME - A time is to be specified in Value
NPRES - A normal pressure is to be specified in Value
SLDA - A total sliding distance (algebraic) is to be specified in Value
SLDI - A total sliding distance (absolute) is to be specified in Value
SLRV - A sliding velocity is to be specified in Value
CYCLE - A healing cycle number is to be specified in Value
UFXX - User-defined field variable (UF01,UF02, ..., UF09)
value
The field value to be referenced (use this command multiple times
to enter values of different field variables).
Notes
-----
Define your data tables as field-variable-dependent (via the
appropriate TB command shown below), then issue the TBFIELD command to
define the field values.
Define data values in ascending order for all field quantities. If a
field value is to be held constant, define it only once; subsequent
definitions are ignored.
There is no limit on the number of values you can specify. The
specified field value remains active until the next TBFIELD command is
input.
After you have defined the field value(s), define your data for the
data tables (TBDATA).
See Understanding Field Variables in the Material Reference for more
information about the interpolation scheme used for field-dependent
material properties.
See Full Harmonic Analysis in the Structural Analysis Guide for more
information about using TBFIELD with TB,ELASTIC or TB,SDAMP.
The TBFIELD command supports the following material models (TB,Lab
commands):
The TEMP value specified on this command corresponds to the average
temperature on the contact surface for contact elements CONTA171,
CONTA172, CONTA173, CONTA174, CONTA175, CONTA176, and CONTA177. For
contact element CONTA178, the TEMP value corresponds to the average
temperature of the nodes.
The TIME value specified on this command corresponds to the analysis
time specified on the TIME command.
The algebraic sliding distance (SLDA) specified on this command is the
total sliding distance (the algebraic sum) as reported in the element
output definitions table for the contact elements (for example, TASS
and TASR output items for CONTA174).
The absolute sliding distance (SLDI) specified on this command is the
total accumulated sliding distance (the absolute sum) as reported in
the element output definitions table for the contact elements (for
example, AASS and AASR output items for CONTA174).
When used with TB,FRIC, field variables defined by TBFIELD are only
available for isotropic friction (TBOPT = ISO) and orthotropic friction
(TBOPT = ORTHO); they are not available for user-defined friction
(TBOPT = USER).
See Contact Friction in the Material Reference for more information
about using TBFIELD with TB,FRIC.
"""
command = "TBFIELD,%s,%s" % (str(type_), str(value))
return self.run(command, **kwargs)
def tbin(self, oper="", par1="", par2="", par3="", par4="", **kwargs):
"""APDL Command: TBIN
Sets parameters used for interpolation of the material data tables.
Parameters
----------
oper
Operation to perform:
Operation to perform: - SCALE
par1
Independent variable, which can be any field variable specified via
the TBFIELD command.
par2
Index of any material parameter specified via the TBDATA command.
par3
Scale to be used for the independent variable. Valid options are
LINEAR (linear) or LOG (logarithmic).
par4
Scale to be used for the dependent variable (the material parameter
specified via Par2). Valid options are LINEAR (linear) or LOG
(logarithmic).
Notes
-----
For a list of the supported material data tables (TB), see Logarithmic
Interpolation and Scaling in the Material Reference.
"""
command = "TBIN,%s,%s,%s,%s,%s" % (
str(oper), str(par1), str(par2), str(par3), str(par4))
return self.run(command, **kwargs)
def tblist(self, lab="", mat="", **kwargs):
"""APDL Command: TBLIST
Lists the material data tables.
Parameters
----------
lab
Data table label. (See the TB command for valid labels.) Defaults
to the active table. If ALL, list data for all labels.
mat
Material number to be listed (defaults to the active material). If
ALL, list data tables for all materials.
Notes
-----
This command is a utility command, valid anywhere.
"""
command = "TBLIST,%s,%s" % (str(lab), str(mat))
return self.run(command, **kwargs)
def tbmodif(self, row="", col="", value="", **kwargs):
"""APDL Command: TBMODIF
Modifies data for the material data table (GUI).
Parameters
----------
row, col
The row and column numbers of the table entry to be modified.
value
The new value to be used in the ROW, COL location.
Notes
-----
The TBMODIF command modifies data for the table specified on the last
TB command.
For temperature-dependent data, the temperature specified on the last
TBTEMP command is used.
TBMODIF is a command generated by the Graphical User Interface (GUI).
It appears in the log file (Jobname.LOG) if a TB material data table is
graphically edited in spreadsheet fashion.
The TBMODIF command is not intended to be typed in directly during an
analysis session (although it can be included in an input file for
batch input or for use with the /INPUT command).
This command is also valid in SOLUTION.
"""
command = "TBMODIF,%s,%s,%s" % (str(row), str(col), str(value))
return self.run(command, **kwargs)
def tbplot(self, lab="", mat="", tbopt="", temp="", segn="", **kwargs):
"""APDL Command: TBPLOT
Displays the material data table.
Parameters
----------
lab
Data table label. Valid labels are: MKIN, KINH, MELAS, MISO,
BKIN, BISO, BH, GASKET, and JOIN. Defaults to the active table
label. For B-H data, also valid are: NB to display NU-B2, MH to
display MU vs. H, and SBH, SNB, SMH to display the slopes of the
corresponding data.
mat
Material number to be displayed (defaults to the active material).
tbopt
Gasket material or joint element material option to be plotted.
ALL - Plots all gasket data.
COMP - Plots gasket compression data only.
LUNL - Plots gasket linear unloading data with compression curve.
NUNL - Plots gasket nonlinear unloading data only.
temp
Specific temperature at which gasket data or joint element material
data will be plotted (used only when Lab = GASKET or JOIN). Use
TEMP = ALL to plot gasket data or joint element material data at
all temperatures.
segn
Segment number of plotted curve (valid only when Lab = GASKET):
NO - Segment number is not added to plotted curve (default).
YES - Segment number is added to plotted curve. This option is ignored if the number
of data points in a curve exceeds 20.
Notes
-----
Only data for stress-strain, B-H, gasket curves, or joint element
nonlinear material model curves can be displayed.
The TBOPT and TEMP values are valid only when Lab = GASKET or JOIN.
The SEGN value is valid only when Lab = GASKET.
This command is valid in any processor.
"""
command = "TBPLOT,%s,%s,%s,%s,%s" % (
str(lab), str(mat), str(tbopt), str(temp), str(segn))
return self.run(command, **kwargs)
def tbpt(self, oper="", x1="", x2="", x3="", xn="", **kwargs):
"""APDL Command: TBPT
Defines a point on a nonlinear data curve.
Parameters
----------
oper
Operation to perform:
DEFI - Defines a new data point (default). The point is inserted into the table in
ascending order of X1. If a point already exists with the
same X1 value, it is replaced.
DELE - Deletes an existing point. The X1 value must match the X1 value of the point
to be deleted (XN is ignored).
x1, x2, ..., xn
The N components of the point. N depends on the type of data table.
Except for TB,EXPE all other TB Tables support only 2 components.
Notes
-----
TBPT defines a point on a nonlinear data curve (such as a stress-strain
curve, B-H curve, etc.) at the temperature specified on the last TBTEMP
command. The meaning of the values depends on the type of data table
specified on the last TB command (MISO, BH, etc.).
This command is also valid in SOLUTION.
"""
command = "TBPT,%s,%s,%s,%s,%s" % (
str(oper), str(x1), str(x2), str(x3), str(xn))
return self.run(command, **kwargs)
def tbtemp(self, temp="", kmod="", **kwargs):
"""APDL Command: TBTEMP
Defines a temperature for a material data table.
Parameters
----------
temp
Temperature value (defaults to 0.0 if KMOD is blank).
kmod
If blank, TEMP defines a new temperature. (Issue TBLIST to list
temperatures and data.)
Notes
-----
The TBTEMP command defines a temperature to be associated with the data
on subsequent TBPT or TBDATA commands.
The defined temperature remains active until the next TBTEMP command is
issued.
Data values must be defined with the temperatures in ascending order.
This command is also valid in SOLUTION.
"""
command = "TBTEMP,%s,%s" % (str(temp), str(kmod))
return self.run(command, **kwargs)
```
#### File: _commands/preproc/volumes.py
```python
from typing import Optional, Union
from ansys.mapdl.core.mapdl_types import MapdlInt, MapdlFloat
from ansys.mapdl.core._commands import parse
class Volumes:
def extopt(self, lab: str = "", val1: Union[str, int] = "",
val2: Union[str, int] = "", val3: MapdlInt = "",
val4: MapdlInt = "", **kwargs) -> Optional[str]:
"""Controls options relating to the generation of volume elements from area elements.
APDL Command: EXTOPT
Parameters
----------
lab
Label identifying the control option. The meanings of
Val1, Val2, and Val3 will vary depending on Lab.
ON - Sets carryover of the material attributes, real
constant attributes, and element coordinate system
attributes of the pattern area elements to the
generated volume elements. Sets the pattern
area mesh to clear when volume generations are done.
Val1, Val2, and Val3 are ignored.
OFF - Removes all settings associated with this command.
Val1, Val2, and Val3 are ignored.
STAT - Shows all settings associated with this command.
Val1, Val2, Val3, and Val4 are ignored.
ATTR - Sets carryover of particular pattern area attributes
(materials, real constants, and element coordinate
systems) of the pattern area elements to the
generated volume elements. (See 2.) Val1 can be:
0 - Sets volume elements to use
current MAT command settings.
1 - Sets volume elements to use material
attributes of the pattern area elements.
Val2 can be:
0 - Sets volume elements to use current REAL
command settings.
1 - Sets volume elements to use real constant attributes
of the pattern area elements.
Val3 can be:
0 - Sets volume elements to use current ESYS command
settings.
1 - Sets volume elements to use element coordinate
system attributes of the pattern area elements.
Val4 can be:
0 - Sets volume elements to use current SECNUM command
settings.
1 - Sets volume elements to use section attributes of
the pattern area elements.
ESIZE - Val1 sets the number of element divisions in the
direction of volume generation or volume sweep.
For VDRAG and VSWEEP, Val1 is overridden by the
LESIZE command NDIV setting. Val2 sets the spacing
ratio (bias) in the direction of volume generation
or volume sweep. If positive, Val2 is the nominal
ratio of last division size to first division size
(if > 1.0, sizes increase, if < 1.0, sizes
decrease). If negative, Val2 is the nominal ratio of
center division(s) size to end divisions size. Ratio
defaults to 1.0 (uniform spacing).
Val3 and Val4 are ignored.
ACLEAR - Sets clearing of pattern area mesh.
(See 3.) Val1 can be:
0 - Sets pattern area to remain meshed when volume
generation is done.
1 - Sets pattern area mesh to clear when volume
generation is done. Val2, Val3, and Val4 are
ignored.
VSWE - Indicates that volume sweeping options will be set
using Val1 and Val2. Settings specified with EXTOPT,
VSWE will be used the next time the VSWEEP command
is invoked. If Lab = VSWE, Val1 becomes a label.
Val1 can be:
AUTO - Indicates whether you will be prompted for the source
and target used by VSWEEP or if VSWE should
automatically determine the source and target.
If Val1 = AUTO, Val2 is ON by default. VSWE will
automatically determine the source and target for
VSWEEP. You will be allowed to pick more than one
volume for sweeping. When Val2 = OFF, the user will
be prompted for the source and target for VSWEEP.
You will only be allowed to pick one volume for
sweeping.
TETS - Indicates whether VSWEEP will tet mesh non-sweepable
volumes or leave them unmeshed. If Val1 = TETS,
Val2 is OFF by default. Non-sweepable volumes will be
left unmeshed. When Val2 = ON, the non-sweepable
volumes will be tet meshed if the assigned element
type supports tet shaped elements.
val1, val2, val3, val4
Additional input values as described under each option for
Lab.
Notes
-----
EXTOPT controls options relating to the generation of volume
elements from pattern area elements using the VEXT, VROTAT,
VOFFST, VDRAG, and VSWEEP commands. (When using VSWEEP,
the pattern area is referred to as the source area.)
Enables carryover of the attributes of the pattern area
elements to the generated volume elements when you are using
VEXT, VROTAT, VOFFST, or VDRAG. (When using VSWEEP, since the
volume already exists, use the VATT command to assign attributes
before sweeping.)
When you are using VEXT, VROTAT, VOFFST, or VDRAG, enables
clearing of the pattern area mesh when volume generations are
done. (When you are using VSWEEP, if selected, the area meshes
on the pattern (source), target, and/or side areas clear when
volume sweeping is done.)
Neither EXTOPT,VSWE,AUTO nor EXTOPT,VSWE,TETS will be affected
by EXTOPT,ON or EXTOPT, OFF.
"""
command = f"EXTOPT,{lab},{val1},{val2},{val3},{val4}"
return self.run(command, **kwargs)
def v(self, p1="", p2="", p3="", p4="", p5="", p6="", p7="", p8="",
**kwargs) -> int:
"""Define a volume through keypoints.
APDL Command: V
Parameters
----------
p1 : int, optional
Keypoint defining starting corner of volume.
p2 : int, optional
Keypoint defining second corner of volume.
p3 : int, optional
Keypoint defining third corner of volume.
p4 : int, optional
Keypoint defining fourth corner of volume.
p5 : int, optional
Keypoint defining fifth corner of volume.
p6 : int, optional
Keypoint defining sixth corner of volume.
p7 : int, optional
Keypoint defining seventh corner of volume.
p8 : int, optional
Keypoint defining eighth corner of volume.
Returns
-------
int
Volume number of the generated volume.
Examples
--------
Create a simple cube volume.
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 1, 0, 0)
>>> k2 = mapdl.k("", 1, 1, 0)
>>> k3 = mapdl.k("", 0, 1, 0)
>>> k4 = mapdl.k("", 0, 0, 1)
>>> k5 = mapdl.k("", 1, 0, 1)
>>> k6 = mapdl.k("", 1, 1, 1)
>>> k7 = mapdl.k("", 0, 1, 1)
>>> v0 = mapdl.v(k0, k1, k2, k3, k4, k5, k6, k7)
>>> v0
1
Create a triangular prism
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 1, 0, 0)
>>> k2 = mapdl.k("", 1, 1, 0)
>>> k3 = mapdl.k("", 0, 1, 0)
>>> k4 = mapdl.k("", 0, 0, 1)
>>> k5 = mapdl.k("", 1, 0, 1)
>>> k6 = mapdl.k("", 1, 1, 1)
>>> k7 = mapdl.k("", 0, 1, 1)
>>> v1 = mapdl.v(k0, k1, k2, k2, k4, k5, k6, k6)
>>> v1
2
Create a tetrahedron
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 1, 0, 0)
>>> k2 = mapdl.k("", 1, 1, 0)
>>> k3 = mapdl.k("", 0, 0, 1)
>>> v2 = mapdl.v(k0, k1, k2, k2, k3, k3, k3, k3)
>>> v2
3
Notes
-----
Defines a volume (and its corresponding lines and areas)
through eight (or fewer) existing keypoints. Keypoints must
be input in a continuous order. The order of the keypoints
should be around the bottom and then the top. Missing lines
are generated "straight" in the active coordinate system and
assigned the lowest available numbers [NUMSTR]. Missing areas
are generated and assigned the lowest available numbers.
Solid modeling in a toroidal coordinate system is not recommended.
Certain faces may be condensed to a line or point by repeating
keypoints. For example, use V,P1,P2,P3,P3,P5,P6,P7,P7 for a
triangular prism or V,P1,P2,P3,P3,P5,P5,P5,P5 for a tetrahedron.
Using keypoints to produce partial sections in CSYS = 2 can generate
anomalies; check the resulting volumes carefully.
"""
command = f"V,{p1},{p2},{p3},{p4},{p5},{p6},{p7},{p8}"
return parse.parse_v(self.run(command, **kwargs))
def va(self, a1="", a2="", a3="", a4="", a5="", a6="", a7="", a8="", a9="",
a10="", **kwargs) -> int:
"""Generate a volume bounded by existing areas.
APDL Command: VA
Parameters
----------
a1, a2, a3, . . . , a10
List of areas defining volume. The minimum number of
areas is 4. If A1 = ALL, use all selected [ASEL] areas
and ignore A2 to A10. A component name may also be
substituted for A1.
Returns
-------
int
Volume number of the volume.
Examples
--------
Create a simple tetrahedral bounded by 4 areas.
>>> k0 = mapdl.k('', -1, 0, 0)
>>> k1 = mapdl.k('', 1, 0, 0)
>>> k2 = mapdl.k('', 1, 1, 0)
>>> k3 = mapdl.k('', 1, 0.5, 1)
>>> a0 = mapdl.a(k0, k1, k2)
>>> a1 = mapdl.a(k0, k1, k3)
>>> a2 = mapdl.a(k1, k2, k3)
>>> a3 = mapdl.a(k0, k2, k3)
>>> vnum = mapdl.va(a0, a1, a2, a3)
>>> vnum
1
Notes
-----
This command conveniently allows generating volumes from
regions having more than eight keypoints (which is not allowed
with the V command). Areas may be input in any order. The
exterior surface of a VA volume must be continuous, but holes
may pass completely through it.
"""
command = f"VA,{a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10}"
return parse.parse_v(self.run(command, **kwargs))
def vdele(self, nv1="", nv2="", ninc="", kswp="", **kwargs):
"""Deletes unmeshed volumes.
APDL Command: VDELE
Parameters
----------
nv1, nv2, ninc
Delete volumes from NV1 to NV2 (defaults to NV1) in steps of NINC
(defaults to 1). If NV1 = ALL, NV2 and NINC are ignored and all
selected volumes [VSEL] are deleted. If NV1 = P, graphical picking
is enabled and all remaining command fields are ignored (valid only
in the GUI). A component name may also be substituted for NV1 (NV2
and NINC are ignored).
kswp
Specifies whether keypoints, lines, and areas are also deleted:
0 - Delete volumes only (default).
1 - Delete volumes, as well as keypoints, lines, and areas attached to the
specified volumes but not shared by other volumes.
"""
command = f"VDELE,{nv1},{nv2},{ninc},{kswp}"
return self.run(command, **kwargs)
def vdgl(self, nv1="", nv2="", ninc="", **kwargs):
"""Lists keypoints of a volume that lie on a parametric degeneracy.
APDL Command: VDGL
Parameters
----------
nv1, nv2, ninc
List keypoints that lie on a parametric degeneracy on volumes from
NV1 to NV2 (defaults to NV1) in steps of NINC (defaults to 1). If
NV1 = ALL (default), NV2 and NINC will be ignored and keypoints on
all selected volumes [VSEL] will be listed. If NV1 = P, graphical
picking is enabled and all remaining command fields are ignored
(valid only in the GUI). You may also substitute a component name
for NV1 (ignore NV2 and NINC).
Notes
-----
See the Modeling and Meshing Guide for details about parametric
degeneracies.
This command is valid in any processor.
"""
command = f"VDGL,{nv1},{nv2},{ninc}"
return self.run(command, **kwargs)
def vdrag(self, na1="", na2="", na3="", na4="", na5="", na6="", nlp1="",
nlp2="", nlp3="", nlp4="", nlp5="", nlp6="", **kwargs) -> str:
"""Generate volumes by dragging an area pattern along a path.
APDL Command: VDRAG
Parameters
----------
na1, na2, na3, . . . , na6
List of areas in the pattern to be dragged (6 maximum if
using keyboard entry). If NA1 = ALL, all selected areas
will be swept along the path. A component name may also
be substituted for NA1.
nlp1, nlp2, nlp3, . . . , nlp6
List of lines defining the path along which the pattern is
to be dragged (6 maximum if using keyboard entry). Must
be a continuous set of lines. To be continuous, adjacent
lines must share the connecting keypoint (the end keypoint
of one line must also be first keypoint of the next line).
Returns
-------
str
MAPDL command output.
Examples
--------
Create a square with a hole in it and drag it along an arc.
>>> anum0 = mapdl.blc4(0, 0, 1, 1)
>>> anum1 = mapdl.blc4(0.25, 0.25, 0.5, 0.5)
>>> aout = mapdl.asba(anum0, anum1)
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 1, 0, 1)
>>> k2 = mapdl.k("", 1, 0, 0)
>>> l0 = mapdl.larc(k0, k1, k2, 2)
>>> output = mapdl.vdrag(aout, nlp1=l0)
>>> print(output)
DRAG AREAS
3,
ALONG LINES
9
Notes
-----
Generates volumes (and their corresponding keypoints, lines,
and areas) by sweeping a given area pattern along a
characteristic drag path. If the drag path consists of
multiple lines, the drag direction is determined by the
sequence in which the path lines are input (NLP1, NLP2, etc.).
If the drag path is a single line (NLP1), the drag direction
is from the keypoint on the drag line that is closest to the
first keypoint of the given area pattern to the other end of
the drag line.
The magnitude of the vector between the keypoints of the given
pattern and the first path keypoint remains constant for all
generated keypoint patterns and the path keypoints. The
direction of the vector relative to the path slope also
remains constant so that patterns may be swept around curves.
Lines are generated with the same shapes as the given pattern
and the path lines.
Keypoint, line, area, and volume numbers are automatically
assigned (beginning with the lowest available values
[NUMSTR]). Adjacent lines use a common keypoint, adjacent
areas use a common line, and adjacent volumes use a common
area. For best results, the entities to be dragged should be
orthogonal to the start of the drag path. Drag operations
that produce an error message may create some of the desired
entities prior to terminating.
If element attributes have been associated with the input area
via the AATT command, the opposite area generated by the VDRAG
operation will also have those attributes (i.e., the element
attributes from the input area are copied to the opposite
area). Note that only the area opposite the input area will
have the same attributes as the input area; the areas adjacent
to the input area will not.
If the input areas are meshed or belong to a meshed volume,
the area(s) can be extruded to a 3-D mesh. Note that the NDIV
argument of the ESIZE command should be set before extruding
the meshed areas. Alternatively, mesh divisions can be
specified directly on the drag line(s) (LESIZE). See the
Modeling and Meshing Guide for more information.
You can use the VDRAG command to generate 3-D interface
element meshes for elements INTER194 and INTER195. When
generating interface element meshes using VDRAG, you must
specify the line divisions to generate one interface element
directly on the drag line using the LESIZE command. The
source area to be extruded becomes the bottom surface of the
interface element. Interface elements must be extruded in what
will become the element's local x direction, that is, bottom
to top.
"""
command = f"VDRAG,{na1},{na2},{na3},{na4},{na5},{na6},{nlp1},{nlp2},{nlp3},{nlp4},{nlp5},{nlp6}"
return self.run(command, **kwargs)
def vext(self, na1="", na2="", ninc="", dx="", dy="", dz="", rx="", ry="",
rz="", **kwargs) -> str:
"""Generate additional volumes by extruding areas.
APDL Command: VEXT
Parameters
----------
na1, na2, ninc
Set of areas (NA1 to NA2 in steps of NINC) that defines
the pattern to be extruded. NA2 defaults to NA1, NINC
defaults to 1. If NA1 = ALL, NA2 and NINC are ignored and
the pattern is defined by all selected areas. A component
name may also be substituted for NA1 (NA2 and NINC are
ignored).
dx, dy, dz
Increments to be applied to the X, Y, and Z keypoint
coordinates in the active coordinate system (DR, Dθ, DZ
for cylindrical; DR, Dθ, DΦ for spherical).
rx, ry, rz
Scale factors to be applied to the X, Y, and Z keypoint
coordinates in the active coordinate system (RR, Rθ, RZ
for cylindrical; RR, Rθ, RΦ for spherical). Note that the
Rθ and RΦ scale factors are interpreted as angular
offsets. For example, if CSYS = 1, RX, RY, RZ input of
(1.5,10,3) would scale the specified keypoints 1.5 times
in the radial and 3 times in the Z direction, while adding
an offset of 10 degrees to the keypoints. Zero, blank, or
negative scale factor values are assumed to be 1.0. Zero
or blank angular offsets have no effect.
Returns
-------
str
MAPDL command output.
Examples
--------
Create a basic cylinder by extruding a circle.
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 0, 0, 1)
>>> k2 = mapdl.k("", 0, 0, 0.5)
>>> carc0 = mapdl.circle(k0, 1, k1)
>>> a0 = mapdl.al(*carc0)
>>> mapdl.vext(a0, dz=4)
Create a tapered cylinder.
>>> mapdl.vdele('all')
>>> mapdl.vext(a0, dz=4, rx=0.3, ry=0.3, rz=1)
Notes
-----
Generates additional volumes (and their corresponding
keypoints, lines, and areas) by extruding and scaling a
pattern of areas in the active coordinate system.
If element attributes have been associated with the input area
via the AATT command, the opposite area generated by the VEXT
operation will also have those attributes (i.e., the element
attributes from the input area are copied to the opposite
area). Note that only the area opposite the input area will
have the same attributes as the input area; the areas adjacent
to the input area will not.
If the areas are meshed or belong to meshed volumes, a 3-D
mesh can be extruded with this command. Note that the NDIV
argument on the ESIZE command should be set before extruding
the meshed areas.
Scaling of the input areas, if specified, is performed first,
followed by the extrusion.
In a non-Cartesian coordinate system, the VEXT command locates
the end face of the volume based on the active coordinate
system. However, the extrusion is made along a straight line
between the end faces. Note that solid modeling in a toroidal
coordinate system is not recommended.
Caution:: : Use of the VEXT command can produce unexpected
results when operating in a non-Cartesian coordinate system.
For a detailed description of the possible problems that may
occur, see Solid Modeling in the Modeling and Meshing Guide.
"""
command = f"VEXT,{na1},{na2},{ninc},{dx},{dy},{dz},{rx},{ry},{rz}"
return self.run(command, **kwargs)
def vgen(self, itime="", nv1="", nv2="", ninc="", dx="", dy="", dz="",
kinc="", noelem="", imove="", **kwargs):
"""Generates additional volumes from a pattern of volumes.
APDL Command: VGEN
Parameters
----------
itime
Do this generation operation a total of ITIMEs, incrementing all
keypoints in the given pattern automatically (or by KINC) each time
after the first. ITIME must be > 1 for generation to occur.
nv1, nv2, ninc
Generate volumes from pattern beginning with NV1 to NV2 (defaults
to NV1) in steps of NINC (defaults to 1). If NV1 = ALL, NV2 and
NINC are ignored and the pattern is all selected volumes [VSEL].
If NV1 = P, graphical picking is enabled and all remaining command
fields are ignored (valid only in the GUI). A component name may
also be substituted for NV1 (NV2 and NINC are ignored).
dx, dy, dz
Keypoint location increments in the active coordinate system (--,
Dθ, DZ for cylindrical, --, Dθ, -- for spherical).
kinc
Keypoint increment between generated sets. If zero, the lowest
available keypoint numbers are assigned [NUMSTR].
noelem
Specifies if elements and nodes are also to be generated:
0 - Generate nodes and elements associated with the original volumes, if they
exist.
1 - Do not generate nodes and elements.
imove
Specifies whether to redefine the existing volumes:
0 - Generate additional volumes as requested with the ITIME argument.
1 - Move original volumes to new position retaining the same keypoint line, and
area numbers (ITIME, KINC, and NOELEM are ignored).
Corresponding meshed items are also moved if not needed at
their original position.
Notes
-----
Generates additional volumes (and their corresponding keypoints, lines,
areas and mesh) from a given volume pattern. The MAT, TYPE, REAL, and
ESYS attributes are based upon the volumes in the pattern and not upon
the current settings of the pointers. End slopes of the generated
lines remain the same (in the active coordinate system) as those of the
given pattern. For example, radial slopes remain radial, etc.
Generations which produce volumes of a size or shape different from the
pattern (i.e., radial generations in cylindrical systems, radial and
phi generations in spherical systems, and theta generations in
elliptical systems) are not allowed. Note that solid modeling in a
toroidal coordinate system is not recommended. Volume, area, and line
numbers are automatically assigned (beginning with the lowest available
values [NUMSTR]).
"""
command = f"VGEN,{itime},{nv1},{nv2},{ninc},{dx},{dy},{dz},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def vlist(self, nv1="", nv2="", ninc="", **kwargs):
"""Lists the defined volumes.
APDL Command: VLIST
Parameters
----------
nv1, nv2, ninc
List volumes from NV1 to NV2 (defaults to NV1) in steps of NINC
(defaults to 1). If NV1 = ALL (default), NV2 and NINC are ignored
and all selected volumes [VSEL] are listed. If NV1 = P, graphical
picking is enabled and all remaining command fields are ignored
(valid only in the GUI). A component name may also be substituted
for NV1 (NV2 and NINC are ignored).
Notes
-----
An attribute (TYPE, MAT, REAL, or ESYS) listed as a zero is unassigned;
one listed as a positive value indicates that the attribute was
assigned with the VATT command (and will not be reset to zero if the
mesh is cleared); one listed as a negative value indicates that the
attribute was assigned using the attribute pointer [TYPE, MAT, REAL, or
ESYS] that was active during meshing (and will be reset to zero if the
mesh is cleared). A "-1" in the "nodes" column indicates that the
volume has been meshed but there are no interior nodes. The volume
size is listed only if a VSUM command has been performed on the volume.
Volume orientation attributes (KZ1 and KZ2) are listed only if a
VEORIENT command was previously used to define an orientation for the
volume.
This command is valid in any processor.
"""
command = f"VLIST,{nv1},{nv2},{ninc}"
return self.run(command, **kwargs)
def vlscale(self, nv1="", nv2="", ninc="", rx="", ry="", rz="", kinc="",
noelem="", imove="", **kwargs):
"""Generates a scaled set of volumes from a pattern of volumes.
APDL Command: VLSCALE
Parameters
----------
nv1, nv2, ninc
Set of volumes (NV1 to NV2 in steps of NINC) that defines the
pattern to be scaled. NV2 defaults to NV1, NINC defaults to 1. If
NV1 = ALL, NV2 and NINC are ignored and the pattern is defined by
all selected volumes. If NV1 = P, graphical picking is enabled and
all remaining command fields are ignored (valid only in the GUI).
A component name may also be substituted for NV1 (NV2 and NINC are
ignored).
rx, ry, rz
Scale factors to be applied to the X, Y, and Z keypoint coordinates
in active coordinate system (RR, Rθ, RZ for cylindrical; RR, Rθ, RΦ
for spherical). Note that the Rθ and RΦ scale factors are
interpreted as angular offsets. For example, if CSYS = 1, RX, RY,
RZ input of (1.5,10,3) would scale the specified keypoints 1.5
times in the radial and 3 times in the Z direction, while adding an
offset of 10 degrees to the keypoints. Zero, blank, or negative
scale factor values are assumed to be 1.0. Zero or blank angular
offsets have no effect.
kinc
Increment to be applied to keypoint numbers for generated set. If
zero, the lowest available keypoint numbers will be assigned
[NUMSTR].
noelem
Specifies whether nodes and elements are also to be generated:
0 - Nodes and elements associated with the original volumes will be generated
(scaled) if they exist.
1 - Nodes and elements will not be generated.
imove
Specifies whether volumes will be moved or newly defined:
0 - Additional volumes will be generated.
1 - Original volumes will be moved to new position (KINC and NOELEM are ignored).
Use only if the old volumes are no longer needed at their
original positions. Corresponding meshed items are also moved
if not needed at their original position.
Notes
-----
Generates a scaled set of volumes (and their corresponding keypoints,
lines, areas, and mesh) from a pattern of volumes. The MAT, TYPE,
REAL, and ESYS attributes are based on the volumes in the pattern and
not the current settings. Scaling is done in the active coordinate
system. Volumes in the pattern could have been generated in any
coordinate system. However, solid modeling in a toroidal coordinate
system is not recommended.
"""
command = f"VLSCALE,{nv1},{nv2},{ninc},{rx},{ry},{rz},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def voffst(self, narea="", dist="", kinc="", **kwargs):
"""Generates a volume, offset from a given area.
APDL Command: VOFFST
Parameters
----------
narea
Area from which generated volume is to be offset. If NAREA = P,
graphical picking is enabled and all remaining command fields are
ignored (valid only in the GUI).
dist
Distance normal to given area at which keypoints for generated
volume are to be located. Positive normal is determined from the
right-hand rule keypoint order.
kinc
Increment to be applied to the keypoint numbers between sets. If
zero, keypoint numbers will be automatically assigned beginning
with the lowest available value [NUMSTR].
Notes
-----
Generates a volume (and its corresponding keypoints, lines, and areas)
by offsetting from an area. The direction of the offset varies with
the given area normal. End slopes of the generated lines remain the
same as those of the given pattern.
If element attributes have been associated with the input area via the
AATT command, the opposite area generated by the VOFFST operation will
also have those attributes (i.e., the element attributes from the input
area are copied to the opposite area). Note that only the area
opposite the input area will have the same attributes as the input
area; the areas adjacent to the input area will not.
If the areas are meshed or belong to meshed volumes, a 3-D mesh can be
extruded with this command. Note that the NDIV argument on the ESIZE
command should be set before extruding the meshed areas.
"""
command = f"VOFFST,{narea},{dist},{kinc}"
return self.run(command, **kwargs)
def vplot(self, nv1="", nv2="", ninc="", degen="", scale="", **kwargs):
"""Displays the selected volumes.
APDL Command: VPLOT
Parameters
----------
nv1, nv2, ninc
Display volumes from NV1 to NV2 (defaults to NV1) in steps of NINC
(defaults to 1). If NV1 = ALL (default), NV2 and NINC are ignored
and all selected volumes [VSEL] are displayed.
degen
Degeneracy marker:
(blank) - No degeneracy marker is used (default).
DEGE - A red star is placed on keypoints at degeneracies (see the Modeling and Meshing
Guide). Not available if /FACET,WIRE is set.
scale
Scale factor for the size of the degeneracy-marker star. The scale
is the size in window space (-1 to 1 in both directions) (defaults
to .075).
Notes
-----
Displays selected volumes. (Only volumes having areas within the
selected area set [ASEL] will be plotted.) With PowerGraphics on
[/GRAPHICS,POWER], VPLOT will display only the currently selected
areas. This command is also a utility command, valid anywhere. The
degree of tessellation used to plot the volumes is set through the
/FACET command.
"""
command = f"VPLOT,{nv1},{nv2},{ninc},{degen},{scale}"
return self.run(command, **kwargs)
def vrotat(self, na1="", na2="", na3="", na4="", na5="",
na6="", pax1="", pax2="", arc="", nseg="",
**kwargs) -> str:
"""Generate cylindrical volumes by rotating an area pattern about
APDL Command: VROTAT
an axis.
Parameters
----------
na1, na2, na3, . . . , na6
List of areas in the pattern to be rotated (6 maximum if
using keyboard entry). Areas must lie to one side of, and
in the plane of, the axis of rotation. If NA1 = ALL,
all selected areas will define the pattern to be rotated.
A component name may also be substituted for NA1.
pax1, pax2
Keypoints defining the axis about which the area pattern
is to be rotated.
arc
Arc length (in degrees). Positive follows right-hand rule
about PAX1-PAX2 vector. Defaults to 360.
nseg
Number of volumes (8 maximum) around circumference.
Defaults to minimum required for 90 degrees (maximum) arcs, i.e.,
4 for 360 degrees, 3 for 270 degrees, etc.
Returns
-------
str
MAPDL command output.
Examples
--------
Rotate a circle about the Z axis to create a hoop.
First, create a circle offset from origin on the XZ plane.
>>> hoop_radius = 10
>>> hoop_thickness = 0.5
>>> k0 = mapdl.k("", hoop_radius, 0, 0)
>>> k1 = mapdl.k("", hoop_radius, 1, 0)
>>> k2 = mapdl.k("", hoop_radius, 0, hoop_thickness)
>>> carc0 = mapdl.circle(k0, 1, k1)
>>> a0 = mapdl.al(*carc0)
Create a hoop by rotating it about an axis defined by two
keypoints.
>>> k_axis0 = mapdl.k("", 0, 0, 0)
>>> k_axis1 = mapdl.k("", 0, 0, 1)
mapdl.vrotat(a0, pax1=k_axis0, pax2=k_axis1)
Notes
-----
Generates cylindrical volumes (and their corresponding
keypoints, lines, and areas) by rotating an area pattern (and
its associated line and keypoint patterns) about an axis.
Keypoint patterns are generated at regular angular locations
(based on a maximum spacing of 90 degrees). Line patterns are
generated at the keypoint patterns. Arc lines are also
generated to connect the keypoints circumferentially.
Keypoint, line, area, and volume numbers are automatically
assigned (beginning with the lowest available values).
Adjacent lines use a common keypoint, adjacent areas use a
common line, and adjacent volumes use a common area.
To generate a single volume with an arc greater than 180 degrees,
NSEG must be greater than or equal to 2.
If element attributes have been associated with the input area
via the AATT command, the opposite area generated by the
VROTAT operation will also have those attributes (i.e., the
element attributes from the input area are copied to the
opposite area). Note that only the area opposite the input
area will have the same attributes as the input area; the
areas adjacent to the input area will not.
If the given areas are meshed or belong to meshed volumes, the
2-D mesh can be rotated (extruded) to a 3-D mesh. See the
Modeling and Meshing Guide for more information. Note that
the NDIV argument on the ESIZE command should be set before
extruding the meshed areas.
"""
command = f"VROTAT,{na1},{na2},{na3},{na4},{na5},{na6},{pax1},{pax2},{arc},{nseg}"
return self.run(command, **kwargs)
def vsum(self, lab="", **kwargs):
"""Calculates and prints geometry statistics of the selected volumes.
APDL Command: VSUM
Parameters
----------
lab
Controls the degree of tessellation used in the calculation of area
properties. If LAB = DEFAULT, area calculations will use the
degree of tessellation set through the /FACET command. If LAB =
FINE, area calculations are based on a finer tessellation.
Notes
-----
Calculates and prints geometry statistics (volume, centroid location,
moments of inertia, etc.) associated with the selected volumes.
Geometry items are reported in the global Cartesian coordinate system.
A unit density is assumed unless the volumes have a material
association via the VATT command. Items calculated by VSUM and later
retrieved by a *GET or *VGET command are valid only if the model is not
modified after the VSUM command is issued.
Setting a finer degree of tessellation will provide area calculations
with greater accuracy, especially for thin, hollow models. However,
using a finer degree of tessellation requires longer processing.
For very thin volumes, such that the ratio of the minimum to the
maximum dimension is less than 0.01, the VSUM command can provide
erroneous volume information. To ensure that such calculations are
accurate, make certain that you subdivide such volumes so that the
ratio of the minimum to the maximum is at least 0.05.
"""
command = f"VSUM,{lab}"
return self.run(command, **kwargs)
def vsymm(self, ncomp="", nv1="", nv2="", ninc="", kinc="", noelem="",
imove="", **kwargs) -> str:
"""Generate volumes from a volume pattern by symmetry reflection.
APDL Command: VSYMM
Parameters
----------
ncomp
Symmetry key:
- ``'X'`` : X symmetry (default).
- ``'Y'`` : Y symmetry.
- ``'Z'`` : Z symmetry.
nv1, nv2, ninc
Reflect volumes from pattern beginning with NV1 to NV2
(defaults to NV1) in steps of NINC (defaults to 1). If
NV1 = ALL, NV2 and NINC are ignored and the pattern is all
selected volumes [VSEL]. A component name may also be
substituted for NV1 (NV2 and NINC are ignored).
kinc
Keypoint increment between sets. If zero, the lowest
available keypoint numbers are assigned [NUMSTR].
noelem
Specifies whether nodes and elements are also to be generated:
0 - Generate nodes and elements associated with the
original volumes, if they exist.
1 - Do not generate nodes and elements.
imove
Specifies whether volumes will be moved or newly defined:
0 - Generate additional volumes.
1 - Move original volumes to new position retaining the
same keypoint numbers (KINC and NOELEM are ignored).
Corresponding meshed items are also moved if not
needed at their original position.
Returns
-------
str
MAPDL command output.
Examples
--------
Create four blocks by reflecting a single block across the X
component and then the Y component.
>>> vnum = mapdl.blc4(1, 1, 1, 1, depth=1)
>>> mapdl.vsymm('X', vnum)
>>> output = mapdl.vsymm('Y', 'ALL')
>>> print(output)
SYMMETRY TRANSFORMATION OF VOLUMES USING COMPONENT Y
SET IS ALL SELECTED VOLUMES
Notes
-----
Generates a reflected set of volumes (and their corresponding
keypoints, lines, areas and mesh) from a given volume pattern
by a symmetry reflection (see analogous node symmetry command,
NSYM). The MAT, TYPE, REAL, and ESYS attributes are based
upon the volumes in the pattern and not upon the current
settings. Reflection is done in the active coordinate system
by changing a particular coordinate sign. The active
coordinate system must be a Cartesian system. Volumes in the
pattern may have been generated in any coordinate system.
However, solid modeling in a toroidal coordinate system is not
recommended. Volumes are generated as described in the VGEN
command.
See the ESYM command for additional information about symmetry
elements.
"""
command = f"VSYMM,{ncomp},{nv1},{nv2},{ninc},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def vtran(self, kcnto="", nv1="", nv2="", ninc="", kinc="", noelem="",
imove="", **kwargs):
"""Transfers a pattern of volumes to another coordinate system.
APDL Command: VTRAN
Parameters
----------
kcnto
Reference number of coordinate system where the pattern is to be
transferred. Transfer occurs from the active coordinate system.
The coordinate system type and parameters of KCNTO must be the same
as the active system.
nv1, nv2, ninc
Transfer volumes from pattern beginning with NV1 to NV2 (defaults
to NV1) in steps of NINC (defaults to 1). If NV1 = ALL, NV2 and
NINC are ignored and the pattern is all selected volumes [VSEL].
If NV1 = P, graphical picking is enabled and all remaining command
fields are ignored (valid only in the GUI). A component name may
also be substituted for NV1 (NV2 and NINC are ignored).
kinc
Keypoint increment between sets. If zero, the lowest available
keypoint numbers are assigned [NUMSTR].
noelem
Specifies whether elements and nodes are also to be generated:
0 - Generate nodes and elements associated with the original volumes, if they
exist.
1 - Do not generate nodes and elements.
imove
Specifies whether to redefine the existing volumes:
0 - Generate additional volumes.
1 - Move original volumes to new position retaining the same keypoint numbers (KINC
and NOELEM are ignored). Corresponding meshed items are also
moved if not needed at their original position.
Notes
-----
Transfers a pattern of volumes (and their corresponding keypoints,
lines, areas and mesh) from one coordinate system to another (see
analogous node transfer command, TRANSFER). The MAT, TYPE, REAL, and
ESYS attributes are based upon the volumes in the pattern and not upon
the current settings. Coordinate systems may be translated and rotated
relative to each other. Initial pattern may be generated in any
coordinate system. However, solid modeling in a toroidal coordinate
system is not recommended. Coordinate and slope values are interpreted
in the active coordinate system and are transferred directly. Volumes
are generated as described in the VGEN command.
"""
command = f"VTRAN,{kcnto},{nv1},{nv2},{ninc},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
```
#### File: _commands/solution/multi_field_solver_definition_commands.py
```python
class MultiFieldSolverDefinitionCommands:
def mfcmmand(self, fnumb="", fname="", ext="", **kwargs):
"""Captures field solution options in a command file.
APDL Command: MFCMMAND
Parameters
----------
fnumb
Field number specified by the MFELEM command.
fname
Command file name specified for the field number. Defaults to field
"FNUMB".
ext
Extension for Fname. Defaults to .cmd.
Notes
-----
All relevant solution option commands for the specified field are
written to a file with the extension .cmd. Refer to the commands in the
following tables in the Command Reference: Analysis Options, Nonlinear
Options, Dynamic Options, and Load Step Options.
This command is also valid in PREP7.
See Multi-field Commands in the Coupled-Field Analysis Guide for a list
of all ANSYS Multi-field solver commands and their availability for MFS
and MFX analyses.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"MFCMMAND,{fnumb},{fname},{ext}"
return self.run(command, **kwargs)
def mfelem(self, fnumb="", itype1="", itype2="", itype3="", itype4="",
itype5="", itype6="", itype7="", itype8="", itype9="",
itype10="", **kwargs):
"""Defines a field by grouping element types.
APDL Command: MFELEM
Parameters
----------
fnumb
Field number for a group of element types.
itype1, itype2, itype3, . . . , itype10
Element types defined by the ET command.
Notes
-----
You can define up to ten element types per field.
Define only element types that contain elements in the field. Do not
include MESH200 because it is a "mesh-only" element that does not
contribute to the solution.
This command is also valid in PREP7.
See Multi-field Commands in the Coupled-Field Analysis Guide for a list
of all ANSYS Multi-field solver commands and their availability for MFS
and MFX analyses.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"MFELEM,{fnumb},{itype1},{itype2},{itype3},{itype4},{itype5},{itype6},{itype7},{itype8},{itype9},{itype10}"
return self.run(command, **kwargs)
def mfem(self, fnumb="", itype1="", itype2="", itype3="", itype4="",
itype5="", itype6="", itype7="", itype8="", itype9="", itype10="",
**kwargs):
"""Add more element types to a previously defined field number.
APDL Command: MFEM
Parameters
----------
fnumb
Existing field number defined by the MFELEM command.
itype1, itype2, itype3, . . . , itype10
Element types defined by the ET command.
Notes
-----
You can add up to ten element types per MFEM command. This command
should not be used after an initial solution.
This command is also valid in PREP7.
See Multi-field Commands in the Coupled-Field Analysis Guide for a list
of all ANSYS Multi-field solver commands and their availability for MFS
and MFX analyses.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"MFEM,{fnumb},{itype1},{itype2},{itype3},{itype4},{itype5},{itype6},{itype7},{itype8},{itype9},{itype10}"
return self.run(command, **kwargs)
def mfexter(self, fnumb1="", fnumb2="", fnumb3="", fnumb4="", fnumb5="",
fnumb6="", fnumb7="", fnumb8="", fnumb9="", fnumb10="",
fnumb11="", fnumb12="", fnumb13="", fnumb14="", fnumb15="",
fnumb16="", fnumb17="", fnumb18="", fnumb19="", fnumb20="",
**kwargs):
"""Defines external fields for an ANSYS Multi-field solver analysis.
APDL Command: MFEXTER
Parameters
----------
fnumb1, fnumb2, fnumb3, . . . , fnumb20
External field numbers defined by the MFELEM command.
Notes
-----
This command specifies external field numbers to be used for load
transfer in an ANSYS Multi-field solver analysis. Use the MFIMPORT
command to import the external fields.
Use the MFELEM command to specify external field numbers. Use the
MFORDER command to specify the solution order for the external fields.
You can define a maximum of 20 fields.
This command is also valid in PREP7.
See Multi-field Commands in the Coupled-Field Analysis Guide for a list
of all ANSYS Multi-field solver commands and their availability for MFS
and MFX analyses.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"MFEXTER,{fnumb1},{fnumb2},{fnumb3},{fnumb4},{fnumb5},{fnumb6},{fnumb7},{fnumb8},{fnumb9},{fnumb10},{fnumb11},{fnumb12},{fnumb13},{fnumb14},{fnumb15},{fnumb16},{fnumb17},{fnumb18},{fnumb19},{fnumb20}"
return self.run(command, **kwargs)
def mffname(self, fnumb="", fname="", **kwargs):
"""Specifies a file name for a field in an ANSYS Multi-field solver
APDL Command: MFFNAME
analysis.
Parameters
----------
fnumb
Field number specified by the MFELEM command.
fname
File name. Defaults to field "FNUMB".
Notes
-----
All files created for the field will have this file name with the
appropriate extensions.
This command is also valid in PREP7.
See Multi-field Commands in the Coupled-Field Analysis Guide for a list
of all ANSYS Multi-field solver commands and their availability for MFS
and MFX analyses.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"MFFNAME,{fnumb},{fname}"
return self.run(command, **kwargs)
```
#### File: _commands/solution/solid_surface_loads.py
```python
class SolidSurfaceLoads:
def sfa(self, area="", lkey="", lab="", value="", value2="", **kwargs):
"""Specifies surface loads on the selected areas.
APDL Command: SFA
Parameters
----------
area
Area to which surface load applies. If ALL, apply load to all
selected areas [ASEL]. A component may be substituted for Area.
lkey
Load key associated with surface load (defaults to 1). Load keys
(1,2,3, etc.) are listed under "Surface Loads" in the input data
table for each element type in the Element Reference. LKEY is
ignored if the area is the face of a volume region meshed with
volume elements.
lab
Valid surface load label. Load labels are listed under "Surface
Loads" in the input table for each area type in the Element
Reference.
value
Surface load value or table name reference for specifying tabular
boundary conditions.
value2
Second surface load value (if any).
Notes
-----
Surface loads may be transferred from areas to elements with the SFTRAN
or SBCTRAN commands. See the SFGRAD command for an alternate tapered
load capability.
Tabular boundary conditions (VALUE = %tabname% and/or VALUE2 =
%tabname%) are available for the following surface load labels (Lab)
only: : PRES (real and/or imaginary components), CONV (film coefficient
and/or bulk temperature) or HFLUX, and RAD (surface emissivity and
ambient temperature). Use the *DIM command to define a table.
This command is also valid in PREP7.
Examples
--------
Select areas with coordinates in the range ``0.4 < Y < 1.0``
>>> mapdl.asel('S', 'LOC', 'Y', 0.4, 1.0)
Set pressure to 250e3 on all areas.
>>> mapdl.sfa('ALL', '', 'PRES', 250e3)
"""
command = f"SFA,{area},{lkey},{lab},{value},{value2}"
return self.run(command, **kwargs)
def sfadele(self, area="", lkey="", lab="", **kwargs):
"""Deletes surface loads from areas.
APDL Command: SFADELE
Parameters
----------
area
Area to which surface load deletion applies. If ALL, delete load
from all selected areas [ASEL]. A component name may be substituted for AREA.
lkey
Load key associated with surface load (defaults to 1). See the SFA
command for details.
lab
Valid surface load label. If ALL, use all appropriate labels. See
the SFA command for labels.
Notes
-----
Deletes surface loads (and all corresponding finite element loads) from
selected areas.
This command is also valid in PREP7.
Examples
--------
Delete all convections applied to all areas where ``-1 < X < -0.5``
>>> mapdl.asel('S', 'LOC', 'X', -1, -0.5)
>>> mapdl.sfadele('ALL', 'CONV')
"""
command = f"SFADELE,{area},{lkey},{lab}"
return self.run(command, **kwargs)
def sfalist(self, area="", lab="", **kwargs):
"""Lists the surface loads for the specified area.
APDL Command: SFALIST
Parameters
----------
area
Area at which surface load is to be listed. If ALL (or blank),
list for all selected areas [ASEL]. If AREA = P, graphical picking
is enabled and all remaining command fields are ignored (valid only
in the GUI). A component name may be substituted for AREA.
lab
Valid surface load label. If ALL (or blank), use all appropriate
labels. See the SFA command for labels.
Notes
-----
This command is valid in any processor.
"""
command = f"SFALIST,{area},{lab}"
return self.run(command, **kwargs)
def sfl(self, line="", lab="", vali="", valj="", val2i="", val2j="",
**kwargs):
"""Specifies surface loads on lines of an area.
APDL Command: SFL
Parameters
----------
line
Line to which surface load applies. If ALL, apply load to all
selected lines [LSEL]. If Line = P, graphical picking is enabled
and all remaining command fields are ignored (valid only in the
GUI). A component name may be substituted for Line.
lab
Valid surface load label. Load labels are listed under "Surface
Loads" in the input table for each element type in the Element
Reference.
vali, valj
Surface load values at the first keypoint (VALI) and at the second
keypoint (VALJ) of the line, or table name for specifying tabular
boundary conditions. If VALJ is blank, it defaults to VALI. If
VALJ is zero, a zero is used. If Lab = CONV, VALI and VALJ are the
film coefficients and VAL2I and VAL2J are the bulk temperatures.
To specify a table, enclose the table name in percent signs (%),
e.g., %tabname%. Use the *DIM command to define a table. If Lab =
CONV and VALI = -N, the film coefficient may be a function of
temperature and is determined from the HF property table for
material N [MP]. If Lab = RAD, VALI and VALJ values are surface
emissivities and VAL2I and VAL2J are ambient temperatures. The
temperature used to evaluate the film coefficient is usually the
average between the bulk and wall temperatures, but may be user
defined for some elements. If Lab = RDSF, VALI is the emissivity
value; the following condition apply: If VALI = -N, the emissivity
may be a function of the temperature and is determined from the
EMISS property table for material N [MP]. If Lab = FSIN in a Multi-
field solver (single or multiple code coupling) analysis, VALI is
the surface interface number. If Lab = FSIN in a unidirectional
ANSYS to CFX analysis, VALJ is the surface interface number (not
available from within the GUI) and VALI is not used unless the
ANSYS analysis is performed using the Multi-field solver.
val2i, val2j
Second surface load values (if any). If Lab = CONV, VAL2I and
VAL2J are the bulk temperatures. If Lab = RAD, VAL2I and VAL2J are
the ambient temperatures. If Lab = RDSF, VAL2I is the enclosure
number. Radiation will occur between surfaces flagged with the same
enclosure numbers. If the enclosure is open, radiation will occur
to the ambient. VAL2I and VAL2J are not used for other surface load
labels. If VAL2J is blank, it defaults to VAL2I. If VAL2J is
zero, a zero is used. To specify a table (Lab = CONV), enclose the
table name in percent signs (%), e.g., %tabname%. Use the *DIM
command to define a table.
Notes
-----
Specifies surface loads on the selected lines of area regions. The
lines represent either the edges of area elements or axisymmetric shell
elements themselves. Surface loads may be transferred from lines to
elements with the SFTRAN or SBCTRAN commands. See the SFE command for
a description of surface loads. Loads input on this command may be
tapered. See the SFGRAD command for an alternate tapered load
capability.
You can specify a table name only when using structural (PRES) and
thermal (CONV [film coefficient and/or bulk temperature], HFLUX), and
surface emissivity and ambient temperature (RAD) surface load labels.
VALJ and VAL2J are ignored for tabular boundary conditions.
This command is also valid in PREP7.
"""
command = f"SFL,{line},{lab},{vali},{valj},{val2i},{val2j}"
return self.run(command, **kwargs)
def sfldele(self, line="", lab="", **kwargs):
"""Deletes surface loads from lines.
APDL Command: SFLDELE
Parameters
----------
line
Line to which surface load deletion applies. If ALL, delete load
from all selected lines [LSEL]. If LINE = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may be substituted for LINE.
lab
Valid surface load label. If ALL, use all appropriate labels. See
the SFL command for labels.
Notes
-----
Deletes surface loads (and all corresponding finite element loads) from
selected lines.
This command is also valid in PREP7.
"""
command = f"SFLDELE,{line},{lab}"
return self.run(command, **kwargs)
def sfllist(self, line="", lab="", **kwargs):
"""Lists the surface loads for lines.
APDL Command: SFLLIST
Parameters
----------
line
Line at which surface load is to be listed. If ALL (or blank),
list for all selected lines [LSEL]. If LINE = P, graphical picking
is enabled and all remaining command fields are ignored (valid only
in the GUI). A component name may be substituted for LINE.
lab
Valid surface load label. If ALL (or blank), use all appropriate
labels. See the SFL command for labels.
Notes
-----
Lists the surface loads for the specified line.
This command is valid in any processor.
"""
command = f"SFLLIST,{line},{lab}"
return self.run(command, **kwargs)
def sftran(self, **kwargs):
"""Transfer the solid model surface loads to the finite element model.
APDL Command: SFTRAN
Notes
-----
Surface loads are transferred only from selected lines and areas to all
selected elements. The SFTRAN operation is also done if the SBCTRAN
command is issued or automatically done upon initiation of the solution
calculations [SOLVE].
This command is also valid in PREP7.
"""
command = f"SFTRAN,"
return self.run(command, **kwargs)
```
#### File: tests/test_inline_functions/test_normals_queries.py
```python
class TestNormalsNodeQueries:
@staticmethod
def build_plane(mapdl, plane: str):
n1 = mapdl.n(1, 0, 0, 0)
if plane == 'xy':
n2 = mapdl.n(2, 0, 1, 0)
n3 = mapdl.n(3, 1, 1, 0)
elif plane == 'xz':
n2 = mapdl.n(2, 1, 0, 0)
n3 = mapdl.n(3, 1, 0, 1)
elif plane == 'yz':
n2 = mapdl.n(2, 0, 1, 0)
n3 = mapdl.n(3, 0, 1, 1)
return n1, n2, n3
def test_normnx(self, query):
nodes = self.build_plane(query._mapdl, 'yz')
cosine = query.normnx(*nodes)
assert abs(cosine) == 1.
def test_normny(self, query):
nodes = self.build_plane(query._mapdl, 'xz')
cosine = query.normny(*nodes)
assert abs(cosine) == 1.
def test_normnz(self, query):
nodes = self.build_plane(query._mapdl, 'xy')
cosine = query.normnz(*nodes)
assert abs(cosine) == 1.
class TestNormalsKeypointsQueries:
@staticmethod
def build_plane(mapdl, plane: str):
k1 = mapdl.k(1, 0, 0, 0)
if plane == 'xy':
k2 = mapdl.k(2, 0, 1, 0)
k3 = mapdl.k(3, 1, 1, 0)
elif plane == 'xz':
k2 = mapdl.k(2, 1, 0, 0)
k3 = mapdl.k(3, 1, 0, 1)
elif plane == 'yz':
k2 = mapdl.k(2, 0, 1, 0)
k3 = mapdl.k(3, 0, 1, 1)
return k1, k2, k3
def test_normkx(self, query):
keypoints = self.build_plane(query._mapdl, 'yz')
cosine = query.normkx(*keypoints)
assert abs(cosine) == 1.
def test_normky(self, query):
keypoints = self.build_plane(query._mapdl, 'xz')
cosine = query.normky(*keypoints)
assert abs(cosine) == 1.
def test_normkz(self, query):
keypoints = self.build_plane(query._mapdl, 'xy')
cosine = query.normkz(*keypoints)
assert abs(cosine) == 1.
```
#### File: tests/test_inline_functions/test_selection_queries.py
```python
from ansys.mapdl.core.inline_functions import SelectionStatus
import pytest
class TestSelectionStatus:
@pytest.mark.parametrize('value', [1, -1, 0, 1., -1., 0.])
def test_happy(self, value):
select = SelectionStatus(value)
assert select == value
assert select is not value
@pytest.mark.parametrize('value', [1.5, 999, 99., '1'])
def test_unhappy(self, value):
with pytest.raises(ValueError):
SelectionStatus(value)
class TestNSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.nsel('S', 'LOC', 'X', 0)
node = q.node(0, 0, 0)
select = q.nsel(node)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
node = q.node(0, 0, 0)
q._mapdl.nsel('NONE')
select = q.nsel(node)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.nsel(999)
assert select == 0
class TestKSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.ksel('S', 'LOC', 'X', 0)
node = q.kp(0, 0, 0)
select = q.ksel(node)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
node = q.kp(0, 0, 0)
q._mapdl.ksel('NONE')
select = q.ksel(node)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.ksel(999)
assert select == 0
class TestLSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.lsel('all')
# there are 6 lines numbered 1-6
for line in range(1, 7, 1):
select = q.lsel(line)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.lsel('NONE')
for line in range(1, 7, 1):
select = q.lsel(line)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.lsel(999)
assert select == 0
class TestASEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.asel('all')
# there are 4 areas numbered 1-4
for area in range(1, 5, 1):
select = q.asel(area)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.asel('NONE')
for area in range(1, 5, 1):
select = q.asel(area)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.asel(999)
assert select == 0
class TestESEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.esel('all')
# there are at least 4 elements numbered 1-4
for element in range(1, 5, 1):
select = q.esel(element)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.esel('NONE')
for element in range(1, 5, 1):
select = q.esel(element)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.esel(999)
assert select == 0
class TestVSEL:
def test_selected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.vsel('all')
select = q.vsel(1)
assert select == 1
def test_unselected(self, selection_test_geometry):
q = selection_test_geometry
q._mapdl.vsel('NONE')
select = q.vsel(1)
assert select == -1
def test_undefined(self, selection_test_geometry):
q = selection_test_geometry
select = q.vsel(999)
assert select == 0
class TestNDNEXT:
def test_existing_nodes(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
nodes = get_details_of_nodes(q._mapdl)
next_ = q.ndnext(1)
assert next_ in nodes
def test_unselected_nodes(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
nodes = get_details_of_nodes(q._mapdl)
last_node = len(nodes)
next_ = q.ndnext(last_node)
assert next_ == 0
def test_non_existing_nodes(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.ndnext(999)
assert next_ == 0
class TestELNEXT:
def test_existing_elements(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
elements = get_details_of_elements(q._mapdl)
next_ = q.elnext(1)
assert next_ in elements
def test_unselected_elements(self, selection_test_geometry,
common_functions_and_classes):
get_details_of_nodes, get_details_of_elements, _, _ = \
common_functions_and_classes
q = selection_test_geometry
elements = get_details_of_elements(q._mapdl)
last_element = len(elements)
next_ = q.elnext(last_element)
assert next_ == 0
def test_non_existing_elements(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.elnext(999)
assert next_ == 0
class TestKPNEXT:
def test_existing_kps(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.kpnext(1)
assert next_ == 2
def test_unselected_kps(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.kpnext(4)
assert next_ == 0
def test_non_existing_kps(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.kpnext(999)
assert next_ == 0
class TestLSNEXT:
def test_existing_lines(self, selection_test_geometry):
# there are 6 lines in in the selection_test_geometry fixture
q = selection_test_geometry
next_ = q.lsnext(1)
assert next_ == 2
def test_unselected_lines(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.lsnext(6)
assert next_ == 0
def test_non_existing_lines(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.lsnext(999)
assert next_ == 0
class TestARNEXT:
def test_existing_areas(self, selection_test_geometry):
# there are 4 areas in in the selection_test_geometry fixture
q = selection_test_geometry
next_ = q.arnext(1)
assert next_ == 2
def test_unselected_areas(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.arnext(4)
assert next_ == 0
def test_non_existing_areas(self, selection_test_geometry):
q = selection_test_geometry
next_ = q.arnext(999)
assert next_ == 0
class TestVLNEXT:
@staticmethod
def make_volumes(mapdl):
mapdl.prep7()
point1 = mapdl.k(999, 0, 10, 0)
point2 = mapdl.k(99, 0, 0, 10)
kps = [mapdl.k(i + 1, i, 0, 0) for i in range(10)]
vols = [mapdl.v(i, i + 1, point1, point2) for i in kps[:-1]]
return vols
def test_existing_volumes(self, query):
q = query
_ = self.make_volumes(q._mapdl)
next_ = q.vlnext(1)
assert next_ == 2
def test_unselected_volumes(self, query):
q = query
vols = self.make_volumes(q._mapdl)
next_ = q.vlnext(len(vols))
assert next_ == 0
def test_non_existing_volumes(self, query):
next_ = query.vlnext(999)
assert next_ == 0
``` |
{
"source": "Journeyman1998/AlgoDS",
"score": 3
} |
#### File: AlgoDS/data_structures/LRU_cache.py
```python
class Node:
def __init__(self, val):
self.value = val
self.prev = None
self.next = None
class cache:
def __init__(self, capacity):
self.capacity = capacity
self.data = {}
self.list_node = {}
self.size = 0
self.head_LRU = Node(-1)
self.tail_LRU = Node(-1)
self.head_LRU.next = self.tail_LRU
self.tail_LRU.prev = self.head_LRU
def get(self, key):
if key not in self.data:
return -1
else:
self.access(key)
return self.data[key]
def put(self, key_pair: tuple):
key, value = key_pair
if key in self.data:
self.data[key] = value
self.access(key)
return
if self.size < self.capacity:
self.insert_new_key(key, value)
self.size += 1
else:
to_remove = self.head_LRU.next
self.head_LRU.next = to_remove.next
del self.data[to_remove.value]
del self.list_node[to_remove.value]
self.insert_new_key(key, value)
def insert_new_key(self, key, value):
to_insert = Node(key)
self.data[key] = value
self.list_node[key] = to_insert
last_node = self.tail_LRU.prev
last_node.next = to_insert
to_insert.prev = last_node
to_insert.next = self.tail_LRU
self.tail_LRU.prev = to_insert
def access(self, key):
assert(key in self.data)
if self.tail_LRU.prev.value != key:
node = self.list_node[key]
prev = node.prev
next = node.next
prev.next = next
next.prev = prev
last_node = self.tail_LRU.prev
last_node.next = node
node.prev = last_node
node.next = self.tail_LRU
self.tail_LRU.prev = node
def print_cache(self):
curr = self.head_LRU.next
while curr != self.tail_LRU:
print(f'{curr.value}:{self.data[curr.value]} ', end=" ")
curr = curr.next
x = cache(2)
x.put((1,3))
x.put((2,4))
print(x.get(0)) #return -1
print(x.get(1)) #return 3
x.put((3, 6)) #will evict key 2
print(x.get(2)) #return -1
print(x.get(3)) #return 6
x.print_cache()
```
#### File: AlgoDS/data_structures/max_heap.py
```python
def heapifyUtil(arr, index):
if 2*index+1 >= len(arr):
return
lc = 2*index+1
rc = 2*index+2
indexLargest = index
largest = arr[index]
if arr[lc] > largest:
largest = arr[lc]
indexLargest = lc
if rc < len(arr) and arr[rc] > largest:
largest = arr[rc]
indexLargest = rc
if indexLargest != index:
arr[index], arr[indexLargest] = arr[indexLargest], arr[index]
heapifyUtil(arr, indexLargest)
def heapify(arr):
for i in range(len(arr)-1, -1, -1):
heapifyUtil(arr, i)
return arr
def heappop(arr):
toRet = arr[0]
arr[0], arr[-1] = arr[-1], arr[0]
arr.pop()
heapifyUtil(arr, 0)
return toRet
def heappush(arr, x):
arr.append(x)
child = len(arr) - 1
parent = (child-1)//2
while arr[parent] < arr[child] and parent >= 0:
arr[parent], arr[child] = arr[child], arr[parent]
child = parent
parent = (child-1)//2
``` |
{
"source": "journeyOS/global_scripts",
"score": 2
} |
#### File: journeyOS/global_scripts/bs_network.py
```python
import optparse
import os
import sys
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from config.user_config import UserConfig
def parseargs():
usage = "usage: %prog [options] arg1 arg2"
parser = optparse.OptionParser(usage=usage)
option = optparse.OptionGroup(parser, "copy file options")
option.add_option("-u", "--user", dest="user", type="string",
help="source data dir", default="")
option.add_option("-p", "--pwd", dest="pwd", type="string",
help="out data dir", default="")
parser.add_option_group(option)
(options, args) = parser.parse_args()
return (options, args)
def main():
print(os.path.abspath(__file__))
(options, args) = parseargs()
user = options.user.strip()
pwd = options.pwd.strip()
if user == '' or pwd == '':
user, pwd = UserConfig.get_user()
host = "http://1.1.1.3"
endpoint = "/ac_portal/login.php"
url = ''.join([host, endpoint])
data = dict()
data['opr'] = 'pwdLogin'
data['rememberPwd'] = 1
data['userName'] = user
data['pwd'] = <PASSWORD>
r = requests.post(url, data)
print(r.content)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
# User interrupt the program with ctrl+c
exit()
```
#### File: global_scripts/gerrit/bot_gerrit.py
```python
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from base.smart_log import smart_log
from gerrit.bot_patch import BotPatch
class BotGerrit(object):
def search_patch(self, cmd):
smart_log(cmd)
try:
process = os.popen(cmd)
outputs = process.readlines()
del outputs[-1] # 删除最后一个元素
patchs = list()
for output in outputs:
result = json.loads(output)
patchs.append(BotPatch(result))
process.close()
return patchs
except:
pass
return None
```
#### File: journeyOS/global_scripts/gerrit.py
```python
import os
import optparse
import subprocess
import re
def parseargs():
usage = "usage: %prog [options] arg1 arg2"
parser = optparse.OptionParser(usage=usage)
buildoptiongroup = optparse.OptionGroup(parser, "git push to gerrit options")
buildoptiongroup.add_option("-b", "--branch", dest="branch",
help="what remote branch want to push", default="master")
buildoptiongroup.add_option("-r", "--reviewer", dest="reviewer",
help="reivew email address", default="")
buildoptiongroup.add_option("-d", "--drafts", action="store_true", dest="drafts",
help="push to gerrit as drafts", default=False)
parser.add_option_group(buildoptiongroup)
(options, args) = parser.parse_args()
return (options, args)
def main():
print(os.path.abspath(__file__))
(options, args) = parseargs()
branch = options.branch.strip()
reviewer = options.reviewer.strip()
drafts = options.drafts
cmd = "git log -n 1"
ret, output = subprocess.getstatusoutput(cmd)
if ret != 0:
print("git cmd fail:\n %s" % (output))
elif "Change-Id" not in output:
print("No Change-Id in commit message!")
print("Get commit-msg by \"scp -p -P 29418 gerrit host:hooks/commit-msg .git/hooks/\".")
print("git commit --amend again.")
return 1
cmd = "git config --get user.name"
ret, output = subprocess.getstatusoutput(cmd)
if ret != 0:
print("")
elif not output:
print("No git user name, add your git email by \"git config --global user.name [your name]\".")
return 1
cmd = "git config --get user.email"
ret, output = subprocess.getstatusoutput(cmd)
if ret != 0:
print("")
elif not output:
print("No git user email, add your git email by \"git config --global user.email <EMAIL>\".")
return 1
cmd = "git remote -v"
ret, output = subprocess.getstatusoutput(cmd)
if ret != 0:
print("git cmd fail:\n %s" % (output))
else:
remote_L = output.splitlines()
if not remote_L:
print("No remote address")
print("git remote add origin ssh://xxx.yyy.zzz/abc/abc")
return 1
remote_L = [re.split("[\t ]", line)[1] for line in remote_L if "(push)" in line]
remote_L = list(set(remote_L))
remote = remote_L[0]
remote_L_len = len(remote_L)
if remote_L_len > 1:
for i in range(0, remote_L_len):
print("[%2s]" % i, remote_L[i])
choice = raw_input("which remote you want? please input [%s - %s] or exit? " % (0, remote_L_len - 1))
try:
index = int(choice)
except ValueError as e:
print("exit")
return 1
if index > remote_L_len or index < 0:
print("[%s] out of index" % (index))
return 1
remote = remote_L[index]
print("your choice remote [%s]" % (index))
if branch:
cmd = "git push %s HEAD:refs/for/%s" % (remote, branch)
if drafts:
cmd = "git push %s HEAD:refs/drafts/%s" % (remote, branch)
if reviewer:
reviewers = reviewer.split(',')
if len(reviewers) > 0:
for email in reviewers:
if cmd.__contains__("%r"):
cmd += ",r=%s" % (email)
else:
cmd += "%%r=%s" % (email)
else:
cmd += "%%r=%s" % (reviewer)
print("git push cmd: %s" % (cmd))
os.system(cmd)
else:
cmd = "git branch -a"
os.system(cmd)
return 1
return 0
if __name__ == "__main__":
main()
```
#### File: global_scripts/issues/bot_jira_track.py
```python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from config.jira_track_config import JiraTrackConfig
from config.jira_server_config import JiraServerConfig
from config.user_config import UserConfig
from im.we_chat import Bot
from issues.bot_jira import BotJira
review_message = "# [{}]({})\n" \
" \n" \
"> <font color=\"comment\">有问题需要处理</font>\n\n"
class BotJiraTrack(object):
def send_track(self, who):
jira_server = JiraServerConfig.get_configs()
local_bot_jira = BotJira(jira_server.service, jira_server.fields, jira_server.user, jira_server.pwd)
jql = JiraTrackConfig.get_configs().jira_track
bot_issues = local_bot_jira.search_jql(jql)
for bot_issue in bot_issues:
message = review_message.format(bot_issue.issue, bot_issue.link)
bot = Bot(UserConfig.get_configs().__getitem__(who))
bot.set_text(message, type='markdown').send()
# if __name__ == "__main__":
# jira_track = BotJiraTrack()
# jira_track.send_track("bot_owner")
``` |
{
"source": "journy-io/python-sdk",
"score": 2
} |
#### File: python-sdk/journyio/client.py
```python
import json
from collections import defaultdict
from datetime import datetime
from urllib import parse
from typing import List
from .events import Event
from .httpclient import HttpRequest, Method, HttpClient, HttpResponse, HttpHeaders
from .results import Failure, Success, ApiKeyDetails, TrackingSnippetResponse
from .utils import JournyException, status_code_to_api_error, assert_journy
from .user_identified import UserIdentified
from .account_identified import AccountIdentified
from .version import version
class Properties(dict):
def __init__(self):
super().__init__()
self.properties = defaultdict(lambda _: None)
def __getitem__(self, key: str):
assert_journy(isinstance(key, str), "The key is not a string.")
return self.properties.get(key.lower().strip())
def __setitem__(
self, key: str, value: str or List[str] or bool or int or datetime or None
):
assert_journy(isinstance(key, str), "The key is not a string.")
if (
isinstance(value, str)
or isinstance(value, int)
or isinstance(value, bool)
or isinstance(value, datetime)
or value is None
or (isinstance(value, list) and all([isinstance(el, str) for el in value]))
):
if isinstance(value, datetime):
value = str(value.isoformat())
self.properties.__setitem__(key.lower().strip(), value)
else:
raise JournyException(
"Value is not a string, number, boolean, datetime or None."
)
def union(self, other):
self.properties.update(other.properties)
return self
def __str__(self):
return json.dumps(self.properties)
def __repr__(self):
return self.__str__()
class Config(object):
"""
A config for the Journy.io's python journyio client.
This contains all the necessary information for the client to work properly.
"""
def __init__(self, api_key: str, root_url: str or None = "https://api.journy.io"):
if root_url is None:
root_url = "https://api.journy.io"
assert_journy(isinstance(api_key, str), "The api key is not a string.")
assert_journy(isinstance(root_url, str), "The root url is not a string.")
self.api_key = api_key
self.root_url = root_url
def __repr__(self):
return f"Config({self.api_key}, {self.root_url})"
def __str__(self):
return self.__repr__()
class Client(object):
"""
Journy.io's python journyio client.
"""
def __init__(self, httpclient: HttpClient, config: Config):
assert_journy(
isinstance(httpclient, HttpClient),
"The httpClient is not a HttpClient object.",
)
assert_journy(isinstance(config, Config), "The config is not a Config object.")
self.httpclient = httpclient
self.config = config
def __repr__(self):
return f"Client({self.httpclient}, {self.config})"
def __str__(self):
return self.__repr__()
def __create_url(self, path) -> str:
return self.config.root_url + path
def __get_headers(self) -> HttpHeaders:
headers = HttpHeaders()
headers["Content-Type"] = "application/json"
headers["User-Agent"] = f"python-sdk/{version}"
headers["x-api-key"] = self.config.api_key
return headers
@staticmethod
def __parse_calls_remaining(response: HttpResponse) -> int:
remaining = response.headers["X-RateLimit-Remaining"]
try:
return int(remaining)
except ValueError:
return None
def add_event(self, event: Event) -> Success[None] or Failure:
assert_journy(isinstance(event, Event), "The event should be an Event object.")
try:
body = {
"identification": {},
"name": event.name,
"metadata": event.metadata.metadata,
}
if event.date:
body["triggeredAt"] = event.date.isoformat()
if event.user:
body["identification"]["user"] = event.user.format_identification()
if event.account:
body["identification"][
"account"
] = event.account.format_identification()
request = HttpRequest(
self.__create_url("/events"),
Method.POST,
self.__get_headers(),
json.dumps(body),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def upsert_user(
self, user: UserIdentified, properties: Properties
) -> Success[None] or Failure:
assert_journy(
isinstance(user, UserIdentified), "User is not a UserIdentified object."
)
assert_journy(
isinstance(properties, Properties), "Properties is not a Properties object."
)
try:
request = HttpRequest(
self.__create_url("/users/upsert"),
Method.POST,
self.__get_headers(),
json.dumps(
{
"identification": user.format_identification(),
"properties": properties.properties,
}
),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def delete_user(self, user: UserIdentified) -> Success[None] or Failure:
assert_journy(
isinstance(user, UserIdentified), "User is not a UserIdentified object."
)
try:
request = HttpRequest(
self.__create_url("/users"),
Method.DELETE,
self.__get_headers(),
json.dumps({"identification": user.format_identification()}),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def upsert_account(
self, account: AccountIdentified, properties: Properties or None
) -> Success[None] or Failure:
assert_journy(
isinstance(account, AccountIdentified),
"Account is not an AccountIdentified object.",
)
if properties is not None:
assert_journy(
isinstance(properties, Properties),
"Properties is not a Properties object.",
)
try:
request = HttpRequest(
self.__create_url("/accounts/upsert"),
Method.POST,
self.__get_headers(),
json.dumps(
{
"identification": account.format_identification(),
"properties": properties.properties,
}
),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def delete_account(self, account: AccountIdentified) -> Success[None] or Failure:
assert_journy(
isinstance(account, AccountIdentified),
"Account is not an AccountIdentified object.",
)
try:
request = HttpRequest(
self.__create_url("/accounts"),
Method.DELETE,
self.__get_headers(),
json.dumps(
{
"identification": account.format_identification(),
}
),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def add_users_to_account(
self, account: AccountIdentified, users: List[UserIdentified]
) -> Success[None] or Failure:
assert_journy(
isinstance(account, AccountIdentified),
"Account is not an AccountIdentified object.",
)
for user in users:
assert_journy(
isinstance(user, UserIdentified),
f"User {user} is not a UserIdentified object.",
)
try:
request = HttpRequest(
self.__create_url("/accounts/users/add"),
Method.POST,
self.__get_headers(),
json.dumps(
{
"account": account.format_identification(),
"users": [
{"identification": user.format_identification()}
for user in users
],
}
),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def remove_users_from_account(
self, account: AccountIdentified, users: List[UserIdentified]
) -> Success[None] or Failure:
assert_journy(
isinstance(account, AccountIdentified),
"Account is not an AccountIdentified object.",
)
for user in users:
assert_journy(
isinstance(user, UserIdentified),
f"User {user} is not a UserIdentified object.",
)
try:
request = HttpRequest(
self.__create_url("/accounts/users/remove"),
Method.POST,
self.__get_headers(),
json.dumps(
{
"account": account.format_identification(),
"users": [
{"identification": user.format_identification()}
for user in users
],
}
),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def link(self, user: UserIdentified, device_id: str) -> Success[None] or Failure:
assert_journy(
isinstance(user, UserIdentified), "The user is not a UserIdentified object."
)
assert_journy(isinstance(device_id, str), "The device id is not a string.")
try:
request = HttpRequest(
self.__create_url("/link"),
Method.POST,
self.__get_headers(),
json.dumps(
{
"deviceId": device_id,
"identification": user.format_identification(),
}
),
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[None](
response.body["meta"]["requestId"], calls_remaining, None
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def get_tracking_snippet(
self, domain: str
) -> Success[TrackingSnippetResponse] or Failure:
assert_journy(isinstance(domain, str), "domain should be a string.")
try:
request = HttpRequest(
self.__create_url(
"/tracking/snippet?domain={}".format(parse.quote_plus(domain))
),
Method.GET,
self.__get_headers(),
None,
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[TrackingSnippetResponse](
response.body["meta"]["requestId"],
calls_remaining,
TrackingSnippetResponse(
response.body["data"]["domain"], response.body["data"]["snippet"]
),
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
def get_api_key_details(self) -> Success[ApiKeyDetails] or Failure:
try:
request = HttpRequest(
self.__create_url("/validate"), Method.GET, self.__get_headers(), None
)
response = self.httpclient.send(request)
calls_remaining = Client.__parse_calls_remaining(response)
if not (200 <= response.status_code < 300):
return Failure(
response.body["meta"]["requestId"],
calls_remaining,
status_code_to_api_error(response.status_code),
)
return Success[ApiKeyDetails](
response.body["meta"]["requestId"],
calls_remaining,
ApiKeyDetails(response.body["data"]["permissions"]),
)
except JournyException as e:
raise e
except Exception:
raise JournyException(f"An unknown error has occurred")
```
#### File: python-sdk/tests/test_utils.py
```python
import pytest
from journyio.utils import (
JournyException,
status_code_to_api_error,
assert_journy,
APIError,
)
def test_journy_exception():
with pytest.raises(JournyException):
raise JournyException("custom message")
try:
raise JournyException("custom message")
except JournyException as e:
assert e.__str__() == "JournyException(custom message)"
def test_status_code_to_api_error():
with pytest.raises(JournyException):
raise status_code_to_api_error("status_code")
assert status_code_to_api_error(200) is APIError.UnknownError
assert status_code_to_api_error(401) is APIError.UnauthorizedError
assert status_code_to_api_error(400) is APIError.BadArgumentsError
assert status_code_to_api_error(429) is APIError.TooManyRequests
assert status_code_to_api_error(404) is APIError.NotFoundError
assert status_code_to_api_error(500) is APIError.ServerError
def test_assert_journy():
with pytest.raises(JournyException) as exception:
assert_journy(isinstance(1, str), "Not a string")
assert exception.value.msg == "Not a string"
``` |
{
"source": "joushx/Online-Ticket-Code",
"score": 3
} |
#### File: Online-Ticket-Code/onlineticket/section.py
```python
from onlineticket.sections.head import HeadSectionParser
from onlineticket.sections.db_bl import DBBLSectionParser
class SectionParser:
def parse(self, section):
parser = self._find_parser(section.name)
if not parser:
return None
parsed = self._parse_header(section)
parsed['content'] = parser.parse(section)
return parsed
def _parse_header(self, section):
return {
'record_id': section.name,
'version': section.version,
'length': section.length
}
def _find_parser(self, name):
if name == 'U_HEAD':
return HeadSectionParser()
elif name == '0080BL':
return DBBLSectionParser()
```
#### File: onlineticket/sections/head.py
```python
from kaitaistruct import KaitaiStream, BytesIO
from onlineticket.generated.head import Head
class HeadSectionParser:
def parse(self, section):
parsed = Head(KaitaiStream(BytesIO(section.data)))
return {
'company': parsed.rics,
'ticket_id': parsed.ticket_id,
'edition_time': {
'year': parsed.edition_time.year,
'month': parsed.edition_time.month,
'day': parsed.edition_time.day,
'hour': parsed.edition_time.hour,
'minute': parsed.edition_time.minute
},
'flags': {
'international': parsed.flag.international,
'test': parsed.flag.test
},
'languages': [
parsed.lang,
parsed.lang2
]
}
``` |
{
"source": "joushx/python-sonicare",
"score": 3
} |
#### File: gui/sensor/model.py
```python
from enum import IntEnum
class SensorFrame:
def __init__(self, type, counter):
self.type = type
self.counter = counter
class GyroscopeFrame(SensorFrame):
def __init__(self, counter, acceleration1, acceleration2, acceleration3, gyro1, gyro2, gyro3):
super().__init__(SensorFrameType.GYROSCOPE, counter)
self.acceleration1 = acceleration1
self.acceleration2 = acceleration2
self.acceleration3 = acceleration3
self.gyro1 = gyro1
self.gyro2 = gyro2
self.gyro3 = gyro3
class TemperatureFrame(SensorFrame):
def __init__(self, counter, temperature):
super().__init__(SensorFrameType.TEMPERATURE, counter)
self.temperature = temperature
class PressureFrame(SensorFrame):
def __init__(self, counter, pressure, alarm):
super().__init__(SensorFrameType.PRESSURE, counter)
self.pressure = pressure
self.alarm = alarm
class SensorFrameType(IntEnum):
GYROSCOPE = 4
TEMPERATURE = 2
PRESSURE = 1
``` |
{
"source": "JousterL/beat-machine",
"score": 3
} |
#### File: beatmachine/effects/temporal.py
```python
import itertools
import random
from typing import Iterable, Generator, List, T
import numpy as np
from beatmachine.effects.base import LoadableEffect, EffectABCMeta
class RandomizeAllBeats(LoadableEffect, metaclass=EffectABCMeta):
"""
Completely randomize the order of every single beat.
"""
__effect_name__ = "randomize"
__effect_schema__ = {}
def __call__(self, beats):
shuffled_beats = list(beats)
random.shuffle(shuffled_beats)
yield from shuffled_beats
def __eq__(self, other):
return isinstance(other, RandomizeAllBeats)
def _chunks(iterable: Iterable[T], size: int = 10) -> Generator[List[T], None, None]:
iterator = iter(iterable)
for first in iterator:
yield list(itertools.chain([first], itertools.islice(iterator, size - 1)))
class ReverseAllBeats(LoadableEffect, metaclass=EffectABCMeta):
"""
Reverses beat order, so the last beat of the song will play first, etc.
"""
__effect_name__ = "reverseb"
__effect_schema__ = {}
def __call__(self, beats):
beat_list = list(beats)
beat_list.reverse()
yield from beat_list
def __eq__(self, other):
return isinstance(other, ReverseAllBeats)
class SwapBeats(LoadableEffect, metaclass=EffectABCMeta):
"""
Swap two beats, indicated by X and Y, every set number of beats.
For example, X = 2 Y = 4, Group = 4 results in a "beats 2 and 4 are swapped" effect. Periods cannot be equal.
"""
__effect_name__ = "swap"
__effect_schema__ = {
"x_period": {
"type": "number",
"minimum": 1,
"default": 2,
"title": "X",
"description": "First beat to swap, starting at 1.",
},
"y_period": {
"type": "number",
"minimum": 1,
"default": 4,
"title": "Y",
"description": "Second beat to swap, starting at 1 and not equal to X.",
},
"group_size": {
"type": "number",
"minimum": 4,
"default": 4,
"title": "Group",
"description": "Beats per measure, or how many beats to wait before swapping again.",
},
"offset": {
"type": "number",
"minimum": 0,
"default": 0,
"title": "Offset",
"description": "How many beats to wait before the first swap.",
},
}
def __init__(
self,
*,
x_period: int = 2,
y_period: int = 4,
group_size: int = 4,
offset: int = 0,
):
if x_period < 1 or y_period < 1:
raise ValueError(
f"`swap` effect must have `x_period` and `y_period` both >= 1, "
f"but got {x_period} and {y_period} respectively"
)
if x_period == y_period:
raise ValueError(
f"`swap` effect must have unique `x_period` and `y_period` values, "
f"but both were {x_period}"
)
if offset < 0:
raise ValueError(f"Offset must be >= 0, but was {offset}")
self.low_period = (min(x_period, y_period) - 1) % group_size + 1
self.high_period = (max(x_period, y_period) - 1) % group_size + 1
self.group_size = group_size
self.offset = offset
def __call__(
self, beats: Iterable[np.ndarray]
) -> Generator[np.ndarray, None, None]:
beats = iter(beats)
for _ in range(self.offset):
yield next(beats)
for group in _chunks(beats, self.group_size):
if len(group) >= self.high_period:
# Swap low and high beats
(group[self.low_period - 1], group[self.high_period - 1]) = (
group[self.high_period - 1],
group[self.low_period - 1],
)
yield from group
def __eq__(self, other):
return (
isinstance(other, SwapBeats)
and self.low_period == other.low_period
and self.high_period == other.high_period
and self.group_size == other.group_size
and self.offset == other.offset
)
class RemapBeats(LoadableEffect, metaclass=EffectABCMeta):
"""
An effect that remaps beats based on a list of target indices. For example, a remap effect with mapping [0, 3, 2, 1]
behaves identically to a swap effect with periods 2 and 4.
Most effects can be emulated through Remap.
"""
__effect_name__ = "remap"
__effect_schema__ = {
"mapping": {
"type": "array",
"items": {"type": "number"},
"title": "Mapping",
"description": "New order of beats, starting at 0. For example, the mapping [0, 3, 2, 1] swaps beats 2 and 4 every 4 beats. The mapping [0, 1, 1, 1] replaces beats 3 and 4 with beat 2.",
}
}
def __init__(self, *, mapping: List[int]):
if any(m < 0 or m >= len(mapping) for m in mapping):
raise ValueError(
f"values of `remap` effect with {len(mapping)} values must be within range "
f"[0, {len(mapping) - 1}], however the following values fall outside of it: "
f"{[m for m in mapping if m < 0 or m >= len(mapping)]}"
)
self.mapping = mapping
def __call__(
self, beats: Iterable[np.ndarray]
) -> Generator[np.ndarray, None, None]:
for group in _chunks(beats, len(self.mapping)):
group_size = len(group)
remapped_group = []
for beat_idx in self.mapping:
if beat_idx < group_size:
remapped_group.append(group[beat_idx])
yield from remapped_group
def __eq__(self, other):
return isinstance(other, RemapBeats) and self.mapping == other.mapping
``` |
{
"source": "Joustie/Mastering-GitLab-12",
"score": 3
} |
#### File: Mastering-GitLab-12/Chapter13/server.py
```python
from flask import Flask, request
import json
app = Flask(__name__)
def runsomething():
print "This is triggered"
@app.route('/',methods=['POST'])
def trigger():
data = json.loads(request.data)
print "New commit by: {}".format(data['commits'][0]['author']['name'])
print "New commit by: {}".format(data['commits'][0]['author']['email'])
print "New commit by: {}".format(data['commits'][0]['message'])
runsomething()
return "OK"
if __name__ == '__main__':
app.run()
``` |
{
"source": "jouterleys/auto-marker-label",
"score": 2
} |
#### File: jouterleys/auto-marker-label/markerLabelGUI.py
```python
import dash
from dash import Dash #to be installed
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_bootstrap_components as dbc #to be installed
import plotly.graph_objects as go #to be installed
from json import JSONEncoder
import mydcc #to be installed
import numpy as np
import os
import webbrowser
import logging
import glob
import time
import automarkerlabel as aml
path=os.path.dirname(os.path.abspath(__file__)) # get current path to use as default
# --------------------------------------------------------------------------- #
# --------------------------------- PARAMETERS ------------------------------ #
# --------------------------------------------------------------------------- #
# File paths
modelpath = os.path.join(path,'data','model_sim_add9_2020-09-15.ckpt')
trainvalpath = os.path.join(path,'data','trainingvals_sim_add9_2020-09-15.pickle')
markersetpath = os.path.join(path,'data','MarkerSet.xml')
# Other
gapfillsize = 24 # Size of gaps to fill with interpolated data when exporting
windowSize = 120 # size of windows used to segment data for algorithm
incr=1 # increment for GUI slider
# --------------------------------------------------------------------------- #
logging.getLogger('werkzeug').setLevel(logging.ERROR)
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
c3dlist = glob.glob(os.path.join(path,'*.c3d'))
# Read marker set
markers, segment, uniqueSegs, segID, _, num_mks = aml.import_markerSet(markersetpath)
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = Dash(__name__, external_stylesheets=external_stylesheets,
title='Marker Labelling GUI', update_title='GUI UPDATING...')
# Define Layout
app.layout = html.Div(
[
mydcc.Relayout(id = "camera", aim = 'the_graph' ),
dbc.Row(dbc.Col(html.H1(children= "3D Visualization of Markers"))),
dbc.Col([dbc.Row([
dbc.Col(html.H2("MODIFY LABELS AND SPLIT TRAJECTORIES :")),
dbc.Col(html.H2("ERROR DETECTION :")),
dbc.Col(html.H2("IMPORT DATA FILE :"))]),
dbc.Row([
dbc.Col([
dbc.Row([
dbc.Col(
html.Div(dcc.Input(
id='marker_ind',
className='entry_button',
placeholder='Current marker #',
type='text',
value='',
style={'margin-top':'6px'}
)),width=4),
dbc.Col(
html.Div(dcc.Dropdown(
id='new_name',
className='dropdown',
placeholder='New name',
options=[{'label': j, 'value': j} for j in markers]
)), width=4),
dbc.Col(width=1),
dbc.Col(
html.Button(
id='submit-button',
className='submit_button',
type='submit',
children='Rename',
style={'margin-bottom':'5px'}), width=3)]),
dbc.Row([
dbc.Col(html.Div('Split marker trajectory:',
style={'text-align':'centre','width': '100%','margin-top': '2px'}),width=4),
dbc.Col(
html.Div(dcc.Input(
id='traj_split_frame',
className='entry_button',
placeholder='Frame # to split at',
type='text',
value='',
style={'margin-top':'6px'}
)),width=5),
# dbc.Col(width=1),
dbc.Col(
html.Button(
id='submit-split-button',
className='submit_button',
type='submit',
children='Split',
style={'margin-top':'5px'}), width={'size':3, 'offset':0})
]),
]),
dbc.Col(html.Div(id='errors',
className='errors'), width=4),
dbc.Col([
dbc.Row([
dbc.Col(html.Div("Click before loading new file:",
style={'text-align':'left','width': '100%','margin-bottom': '5px'}),
width=6,align='center'),
dbc.Col(html.A(html.Button('Refresh\nSettings',
id='refresh',
className='refresh'), href='/'),
width=6)
]),
dbc.Row([
dbc.Col(html.Div('Enter data folder:',
style={'text-align' : 'left','width': '80%',
'margin-bottom':'3px'}),
width=6,align='center'),
dbc.Col(html.Div(dcc.Input(
id='datapath',
className='datapath',
type='text',
placeholder=path,
debounce=True,
size=35,
persistence=True,
style={'width': '100%','margin-bottom':'5px'})),
width=6)
]),
dbc.Row([
dbc.Col(html.Div(
dcc.Dropdown(
id='upload_data',
className='dropdown2',
placeholder='Select File',
value=None,
options=[{'label': j.split(os.path.sep)[-1], 'value': j,
'title': j.split(os.path.sep)[-1]} for j in c3dlist])),
width=6,align='center'),
dbc.Col(html.Button(
id='load_data_button',
className='submit_button',
type='submit',
children='Load Data',
style={'width':'100%','border-width':'2px'}),
width=6)
])
],width=4)
])
]),
dbc.Col([dbc.Row([
dbc.Col(html.Div(id='comment'), width=4),
dbc.Col(html.Div(""),width=4),
dbc.Col(html.Div(id='outputdata', className='output_data'), width=4)
])
]),
dbc.Col([dbc.Row([
dbc.Col(html.H2("VISUALIZATION :")),
dbc.Col(html.H2("ROTATION ABOUT Z :")),
dbc.Col(html.H2("LABEL MARKERS:"))]),
dbc.Row([
dbc.Col(html.Div(dcc.Dropdown(
id='dropdown',
className='dropdown',
options=[{'label': 'Confidence', 'value': 'Confidence'},
{'label': 'Unlabelled','value': 'Unlabelled'},
{'label': 'Segments','value':'Segments'}],
value='Confidence'
),
style={"margin":"0px 0px 0px 0px"}), width = {'size':2, 'offset':1}),
dbc.Col(html.Div(dcc.Input(
id='rotation',
className='entry_button',
placeholder='Enter rotation angle',
type='text',
value=0
)), width={'size':2, 'offset':1}),
dbc.Col(html.Div(id='rot_comment'), width=1),
dbc.Col(html.Button(
id='submit-button-rot',
className='submit_button',
type='submit',
children='Submit',
style={'height':'50%'}
), width={'size':1, 'offset':0}),
dbc.Col([dbc.Row(html.Button(
id='load_button',
className='submit_button',
type='button',
children='Label Markers!',
style={
'margin':'0px 0px 0px 0px'
})),
dbc.Row(html.Div(id='label_comment'))], width={'size':2, 'offset':1})
]),
]),
dbc.Col([dbc.Row([
dbc.Col(html.H2("MARKER LABELS LIST :"), width=4),
dbc.Col(html.H2("3D PLOT :"), width=8)]),
dbc.Row([
dbc.Col(html.Div(id='labels_list_current', className='labels_list',
style={'margin': '10px 0px 0px 10px'}), width=4),
dbc.Col(dcc.Loading(
id="loading_graph",
children=[html.Div(dcc.Graph(id='the_graph',
style={'height':'50vh', 'margin-left':'20%'}))],
type="circle"), width=8),
]),
]),
dbc.Row(dbc.Col(html.Div(id='timeframe'),
style={'margin': '-10px 0px 0px 0px','position':'top'})),
dbc.Row([
dbc.Col(html.Div(dcc.Slider(
id="Time_Slider",
min=0,
value=1,
step=incr,
updatemode="drag"),
style={"position":"left"}), width=11),
dbc.Col(html.Button(
id='export_button',
className='export_button',
type='button',
children='EXPORT TO C3D'), width=1)]),
dbc.Row(dbc.Col(html.Div(id='export_comment')),style={'margin-top':'5px'}),
dbc.Row(dbc.Col(html.Div(id='pts_c3d'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='labels_c3d'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='labels_updated'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='confidence_c3d'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='body_segment'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='frame_rate'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='rotang'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='c3dlist'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='filename'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='rawlabels'), style={'display': 'none'})),
dbc.Row(dbc.Col(html.Div(id='pts_updated'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='pts_current'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='pts_rot'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='confidence_current'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='pts_split'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='labels_split'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='confidence_split'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='labels_current'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='split_timestamp'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='mod_timestamp'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='rot_timestamp'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='label_timestamp'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='message_mod'),style={'display':'none'})),
dbc.Row(dbc.Col(html.Div(id='message_split'),style={'display':'none'})),
]
)
#Update file list dropdown
@app.callback(Output('upload_data','options'),
[Input('datapath','value')])
def update_filelist(datapath):
print('updating file list')
if datapath is not None:
c3dlist = glob.glob(os.path.join(datapath,'*.c3d'))
else:
c3dlist = glob.glob(os.path.join(path,'*.c3d'))
return [{'label': j.split(os.path.sep)[-1], 'value': j} for j in c3dlist]
# Submit rotation angle
@app.callback([Output('rotang','children'), Output('rot_comment','children'),
Output('rot_timestamp', 'children'), Output('pts_rot', 'children')],
[Input('submit-button-rot','n_clicks')],
[State('rotation','value'), State('pts_current', 'children'), State('rotang', 'children')])
def submit_angle(n_clicks, angle, pts, rotang_old):
comment='Rotation : ' + str(angle)
print('rotating')
if n_clicks is not None:
if rotang_old is None:
rotang_old=0
pts=np.array(pts)
pts=pts.astype('float64')
timestamp=time.time()
rotang = float(angle) * np.pi/180
rotang_current=rotang_old+rotang
Ralign = np.array([[np.cos(rotang),-np.sin(rotang),0],
[np.sin(rotang),np.cos(rotang),0],
[0,0,1]])
for i in range(pts.shape[1]):
pts[:,i,:] = np.matmul(Ralign,pts[:,i,:].transpose()).transpose()
elif n_clicks is None:
raise PreventUpdate
return rotang_current, comment, timestamp, pts
# Load data button
@app.callback(Output('upload_data','value'),
[Input('load_data_button','n_clicks')],
[State('upload_data','value')])
def submit_filename(n_clicks,filename):
if n_clicks is not None:
return filename
else:
return dash.dash.no_update
#Load data
@app.callback(
[Output('outputdata','children'),Output('pts_c3d','children'),
Output('frame_rate', 'children'),Output('Time_Slider','max'),
Output('Time_Slider', 'marks'),Output('rawlabels','children')],
[Input('rotang', 'children'),Input('load_data_button','n_clicks')],
[State('upload_data','value')])
def load_data(angle,n_clicks,filename):
outputdata=''
pts=''
fs=''
max_slider=''
marks=''
labels = ''
if angle is None:
angle=0
if (filename is not None):
if os.path.exists(filename) and ('.c3d' in filename):
outputdata=filename
print('importing points')
# angle=0+int(angle)
angle=float(angle)
pts_c3d, fs, labels = aml.import_raw_c3d(filename, angle)
pts = np.array(pts_c3d, dtype=np.float64)
#Slider tick marks
max_slider=len(pts)
if max_slider < 350:
step = 5
elif max_slider < 700:
step = 10
else:
step = round(max_slider/700) * 10
marks_slider = list(range(0,len(pts),step))
marks={str(marks_slider[i]):{'label':str(marks_slider[i]),
"style":{'font-size':'1vw','writing-mode': 'vertical-rl',
'text-orientation': 'mixed'}} for i in range(0,len(marks_slider))}
elif os.path.exists(filename) and ('.c3d' not in filename):
outputdata = 'ERROR: File must be in .c3d format'
else:
outputdata = 'ERROR: file not found \n%s' % (filename)
return outputdata, pts, fs, max_slider, marks, labels
# Label Data
@app.callback([Output('labels_c3d', 'children'), Output('confidence_c3d', 'children'),
Output('body_segment', 'children'), Output('label_comment', 'children'),
Output('label_timestamp', 'children')],
[Input('load_button', 'n_clicks')], [State('pts_current', 'children'),
State('frame_rate', 'children'), State('rawlabels','children'),
State('upload_data', 'value')])
def label_data(n_clicks, pts, fs, rawlabels, filename):
labels=''
confidence=''
body_segment=''
comment=''
timestamp=None
if (filename is not None) and (pts != ''):
pts=np.array(pts)
if n_clicks is None:
labels= rawlabels #[0]*(len(pts[0,:,0]))
confidence= [0]*(len(pts[0,:,0]))
body_segment=None
comment="Labels not assigned"
elif n_clicks is not None:
print('finding labels and confidence scores')
timestamp=time.time()
labels_c3d, confidence_c3d,_ = aml.marker_label(pts,modelpath,trainvalpath,
markersetpath,fs,windowSize)
print('done labelling')
labels=np.array(labels_c3d)
confidence=np.array(confidence_c3d)
body_segment=1
comment="Labels assigned"
return labels, confidence, body_segment, comment, timestamp
# Marker label modifier
@app.callback([Output('message_mod','children'),Output('labels_updated','children'), Output('mod_timestamp', 'children')],
[Input('submit-button','n_clicks'), Input('labels_current', 'children')],
[State('marker_ind', 'value'),State('new_name', 'value'), State('split_timestamp', 'children'), State('mod_timestamp', 'children')])
def name_mod(n_clicks, labels, marker_ind, new_name, split_timestamp, mod_timestamp):
ctx = dash.callback_context
message=""
if labels is not None:
print('listing labels')
if n_clicks is None:
labels_updated=np.array(labels)
if ctx.triggered:
if n_clicks is not None:
labels_updated=np.array(labels)
mod_timestamp=time.time()
index = int(marker_ind)-1
labels_updated[index]=str(new_name)
message='Marker #"{}" has been labelled as "{}"'.format(marker_ind, str(new_name))
else:
raise PreventUpdate
return message, labels_updated, mod_timestamp
# Split trajectory
@app.callback([Output('pts_split','children'),Output('labels_split', 'children'),
Output('confidence_split', 'children'), Output('split_timestamp', 'children'),
Output('message_split', 'children')],
[Input('submit-split-button', 'n_clicks')],
[State('pts_current', 'children'), State('confidence_current','children'),
State('labels_current','children'), State('traj_split_frame','value'),State('marker_ind', 'value')])
def trajectory_split(n_clicks,pts,confidence,labels,split_idx,marker_ind):
print('splitting')
timestamp=time.time()
message=''
if n_clicks is not None:
pts=np.array(pts)
mk_idx = int(marker_ind) - 1
pts = np.concatenate((pts,np.expand_dims(pts[:,mk_idx,:],1)),axis=1) #Duplicate selected marker
pts[int(split_idx):,mk_idx,:] = np.nan
pts[:int(split_idx),-1,:] = np.nan
pts_split=pts
confidence=np.array(confidence)
confidence = np.append(confidence, confidence[mk_idx])
confidence_split=confidence
labels_split = np.append(labels,labels[mk_idx])
message='Marker #"{}" has been split at timeframe "{}"'.format(marker_ind, str(split_idx))
elif n_clicks is None:
raise PreventUpdate
return pts_split, labels_split, confidence_split, timestamp, message
# Label variables handler
@app.callback([Output('labels_current','children'), Output('labels_list_current', 'children'),
Output('comment', 'children')],
[Input('labels_split','children'), Input('labels_c3d', 'children'), Input('labels_updated', 'children'),
Input('rawlabels', 'children')],
[State('split_timestamp', 'children'), State('mod_timestamp', 'children'), State('rot_timestamp', 'children'),
State('label_timestamp', 'children'), State('message_mod', 'children'), State('message_split', 'children')])
def label_handler(labels_split, labels_c3d, labels_updated, rawlabels, split_timestamp, mod_timestamp, rot_timestamp, label_timestamp, message_mod, message_split):
print('Selecting current label variable')
labels_current=labels_c3d #labels from initial importation
labels_list_current=''
comment=''
if split_timestamp is None:
split_timestamp=0
if mod_timestamp is None:
mod_timestamp=0
if rot_timestamp is None:
rot_timestamp=0
if label_timestamp is None:
label_timestamp=0
timestamps=[split_timestamp, mod_timestamp, rot_timestamp, label_timestamp]
timestamps=np.array(timestamps)
if label_timestamp==0 and rawlabels: #Markers still unlabelled
labels_current= rawlabels
if np.nanmax(timestamps)==0:
# print('NOTHING DONE YET')
if len(labels_current)>1:
for j in range(len(labels_current)):
labels_list_current+= str(j+1) + ". " + str(labels_current[j])+'\n'
return labels_current, labels_list_current, comment
if np.nanmax(timestamps)==split_timestamp: #Most recent is SPLIT (last used)
# print('SPLIT LAST USED-labels')
labels_current=labels_split #use labels_split instead of labels_updated
comment=message_split
elif np.nanmax(timestamps)==mod_timestamp:
# print('MOD LAST USED-labels')
labels_current=labels_updated
comment=message_mod
elif np.nanmax(timestamps)==rot_timestamp:
raise PreventUpdate
if len(labels_current)>1:
for j in range(len(labels_current)):
labels_list_current+= str(j+1) + ". " + str(labels_current[j])+'\n'
return labels_current, labels_list_current, comment
#Pts/confidence variable handler
@app.callback([Output('pts_current','children'), Output('confidence_current', 'children')],
[Input('pts_c3d', 'children'), Input('pts_split','children'),
Input('confidence_c3d', 'children'), Input('confidence_split', 'children'),
Input('pts_rot', 'children')],
[State('split_timestamp', 'children'), State('mod_timestamp', 'children'),
State('rot_timestamp', 'children'), State('label_timestamp', 'children')])
def pts_handler(pts_c3d, pts_split, confidence_c3d, confidence_split, pts_rot, split_timestamp, mod_timestamp, rot_timestamp, label_timestamp):
print('Selecting current pts variable')
pts_current=pts_c3d #pts from initial importation
confidence_current=confidence_c3d
if split_timestamp is None:
split_timestamp=0
if mod_timestamp is None:
mod_timestamp=0
if rot_timestamp is None:
rot_timestamp=0
if label_timestamp is None:
label_timestamp=0
timestamps=[split_timestamp, mod_timestamp, rot_timestamp, label_timestamp]
timestamps=np.array(timestamps)
if label_timestamp==0 and pts_current: #Markers still unlabelled
# print('MARKERS NOT LABELLED')
pts_temp=np.array(pts_current)
confidence_current=[0]*(pts_temp.shape[1])
if np.nanmax(timestamps)==0:
return pts_current, confidence_current
if np.nanmax(timestamps)==label_timestamp: #Most recent is LABEL MARERS (last used)
# print('LABELLING LAST USED')
confidence_current=confidence_c3d
return dash.dash.no_update, confidence_current
if np.nanmax(timestamps)==split_timestamp: #Most recent is SPLIT (last used)
# print('SPLIT LAST USED-pts')
pts_current=pts_split
confidence_current=confidence_split
elif np.nanmax(timestamps)==rot_timestamp:
# print('ROT LAST USED-pts')
pts_current=pts_rot
confidence_current=confidence_c3d
return pts_current, dash.dash.no_update
return pts_current, confidence_current
# Error list
@app.callback(Output('errors', 'children'),
[Input('labels_current', 'children'), Input('pts_current','children')],
[State('split_timestamp', 'children'), State('mod_timestamp', 'children')])
def update_error(labels,pts, split_timestamp, mod_timestamp):
errors=''
if split_timestamp is None:
split_timestamp=0
if mod_timestamp is None:
mod_timestamp=0
split_timestamp=float(split_timestamp)
mod_timestamp=float(mod_timestamp)
if split_timestamp>mod_timestamp: #Most recent is SPLIT (last used)
# print('SPLIT LAST USED')
raise PreventUpdate
if pts != '':
print('checking errors')
errors='Label errors have been found for the following markers : \n'
pts = np.array(pts, dtype=np.float64)
#duplicates index
index_duplicates = []
overlap_frames = []
for m in range(len(markers)):
ii = [i for i,x in enumerate(labels) if x == markers[m]]
visible = (~np.isnan(pts[:,ii,0]))
if (visible.sum(1) > 1).any():
# check which ones are actually overlapping
for j1 in range(len(ii)):
for j2 in range(j1+1,len(ii)):
if labels[ii[j2]] != '':
if ((~np.isnan(pts[:,[ii[j1],ii[j2]],0])).sum(1) > 1).any():
index_duplicates.append(ii[j1])
index_duplicates.append(ii[j2])
overlap_frames.append('%d - %d' %
(np.where((~np.isnan(
pts[:,[ii[j1],ii[j2]],0])).sum(1)>1)[0].min(),
np.where((~np.isnan(
pts[:,[ii[j1],ii[j2]],0])).sum(1)>1)[0].max()))
overlap_frames.append('%d - %d' %
(np.where((~np.isnan(
pts[:,[ii[j1],ii[j2]],0])).sum(1)>1)[0].min(),
np.where((~np.isnan(
pts[:,[ii[j1],ii[j2]],0])).sum(1)>1)[0].max()))
nbr=0 # number of errors
for i in range(0,len(labels)):
if i in index_duplicates:
errors+= 'DUPLICATED: '+ str(i+1) + '. ' + \
labels[i]+ ', frames '+ \
overlap_frames[index_duplicates.index(i)]+'\n'
nbr=nbr+1
if labels[i] not in markers and labels[i] != '' and \
labels[i] != 0 and labels[i] != 'None':
errors+= 'MISPELLED: '+ str(i+1) + '. ' + labels[i] +'\n'
nbr=nbr+1
if labels[i] == '' and (~np.isnan(pts[:,i,0])).sum() > 0:
errors += 'UNLABELLED: ' + str(i+1) + '. frames ' + \
str(np.where((~np.isnan(pts[:,i,0])))[0].min()) + ' - ' + \
str(np.where((~np.isnan(pts[:,i,0])))[0].max()) + '\n'
nbr = nbr+1
if nbr==0:
errors='No errors have been found.'
errors='('+str(nbr)+')'+errors
return errors
#Timeframe indicator
@app.callback(Output('timeframe','children'),
[Input('Time_Slider', 'value')])
def current_timeframe(value):
return 'Timeframe: "{}"'.format(value);
#Slider widget & Dropdown
@app.callback(Output('the_graph','figure'),
[Input('dropdown', 'value'), Input('Time_Slider', 'value'),
Input('pts_current','children'), Input('labels_current', 'children'),
Input('confidence_current','children')])
def update_graph(dropdown,Time_Slider, pts, labels, confidence):
fig=''
if pts != '':
print('graphing')
pts= np.array(pts, dtype=np.float64)
confidence=np.array(confidence)
labels_num = []
for i in range(len(labels)):
labels_num.append('%d. %s' % (i+1,labels[i]))
labels=np.array(labels)
labels_num=np.array(labels_num)
#Define points
X = pts[:,:,0]
Y = pts[:,:,1]
Z = pts[:,:,2]
#Center & Scale Axis
max_range = np.array([np.nanmax(X)-np.nanmin(X), np.nanmax(Y)-np.nanmin(Y),
np.nanmax(Z)-np.nanmin(Z)]).max() / 2.0
mid_x = (np.nanmax(X)+np.nanmin(X)) * 0.5 #middle value x, not necessarily 0
mid_y = (np.nanmax(Y)+np.nanmin(Y)) * 0.5 #middle value y
mid_z = (np.nanmax(Z)+np.nanmin(Z)) * 0.5 #middle value z
if dropdown=='Confidence':
fig = go.FigureWidget(
data=[
go.Scatter3d(
x=pts[Time_Slider,:,0],
y=pts[Time_Slider,:,1],
z=pts[Time_Slider,:,2],
showlegend=False,
visible=True,
mode='markers',
hovertext=labels_num,
marker=dict(
size=5,
cmax=1,
cmin=0,
color=confidence,
colorbar=dict(
title="Confidence"
),
colorscale=['red', 'yellow', 'green']
),
)])
elif dropdown=='Unlabelled':
I = (labels=='') | (labels=='None')
fig = go.FigureWidget(
data=[
go.Scatter3d(
x=pts[Time_Slider,I,0],
y=pts[Time_Slider,I,1],
z=pts[Time_Slider,I,2],
showlegend=False,
visible=True,
mode='markers',
hovertext=labels_num[I],
marker=dict(
size=5,
cmax=1,
cmin=0,
color=confidence[I],
colorbar=dict(title="Confidence"),
colorscale=['red', 'yellow', 'green']
),
)])
fig.add_trace(go.Scatter3d(
x=pts[Time_Slider,~I,0],
y=pts[Time_Slider,~I,1],
z=pts[Time_Slider,~I,2],
mode='markers',
hovertext=labels_num[~I],
showlegend=False,
marker=dict(
size=2,
color="DarkSlateGrey",),
)
)
elif dropdown=='Segments':
segclr = -1 * np.ones(pts.shape[1])
for i in range(labels.shape[0]):
if labels[i] in markers:
segclr[i] = segID[markers.index(labels[i])]
else:
segclr[i] = np.nan
I = np.isnan(segclr)
fig = go.FigureWidget(
data=[
go.Scatter3d(
x=pts[Time_Slider,I,0],
y=pts[Time_Slider,I,1],
z=pts[Time_Slider,I,2],
mode='markers',
visible=True,
hovertext=labels_num[I],
showlegend=False,
marker=dict(
size=2,
color="DarkSlateGrey",),
)])
for i in range(len(uniqueSegs)):
fig.add_trace(go.Scatter3d(
x=pts[Time_Slider,segclr==i,0],
y=pts[Time_Slider,segclr==i,1],
z=pts[Time_Slider,segclr==i,2],
mode='markers',
hovertext=labels_num[segclr==i],
showlegend=True,
name=uniqueSegs[i],
marker=dict(
size=5,
cmax=np.nanmax(segclr),
cmin=0,
color=i,
colorscale='hsv'),
))
# Center & Scale Axis on Graph
fig.update_layout(
autosize=False,
scene = dict(
xaxis = dict(nticks=10, range=[mid_x - max_range,mid_x + max_range],),
yaxis = dict(nticks=10, range=[mid_y - max_range, mid_y + max_range],),
zaxis = dict(nticks=10, range=[mid_z - max_range, mid_z + max_range],),
aspectmode="cube"
),
hoverlabel=dict(
bgcolor="white",
font_size=11,
font_family="Georgia"
),
uirevision='same', # don't reset camera zoom and rotation on update
margin=dict(r=250, l=0, b=10, t=0))
return fig;
#Export to c3d
@app.callback(Output('export_comment', 'children'),
[Input('export_button', 'n_clicks'), Input('pts_current', 'children'),
Input('labels_current', 'children'), Input('confidence_current', 'children'),
Input('frame_rate', 'children'),Input('rotang','children')],
[State('upload_data', 'value')])
def export_c3d(n_clicks, pts, labels, confidence, fs, rotang, filename):
export_comment=''
if filename is not None:
output_name = filename[:-4]+'_labelled.c3d'
if n_clicks is None:
raise PreventUpdate
elif n_clicks is not None:
print('exporting to ' + filename)
pts=np.array(pts).astype('float64')
#rotang = float(rotang) * np.pi/180
aml.export_labelled_c3d(pts,labels,rotang,filename,output_name,markers,gapfillsize)
export_comment="exported " + output_name.split(os.path.sep)[-1]
return export_comment
print("Opening Web Browser")
webbrowser.open('http://127.0.0.1:8050/', new=2)
if __name__ == '__main__':
app.run_server(debug=False)
``` |
{
"source": "jouve/coveragepy",
"score": 2
} |
#### File: coveragepy/coverage/report.py
```python
import sys
from coverage.exceptions import CoverageException, NoDataError, NotPython
from coverage.files import prep_patterns, FnmatchMatcher
from coverage.misc import ensure_dir_for_file, file_be_gone
def render_report(output_path, reporter, morfs, msgfn):
"""Run a one-file report generator, managing the output file.
This function ensures the output file is ready to be written to. Then writes
the report to it. Then closes the file and cleans up.
"""
file_to_close = None
delete_file = False
if output_path == "-":
outfile = sys.stdout
else:
# Ensure that the output directory is created; done here
# because this report pre-opens the output file.
# HTMLReport does this using the Report plumbing because
# its task is more complex, being multiple files.
ensure_dir_for_file(output_path)
outfile = open(output_path, "w", encoding="utf-8")
file_to_close = outfile
try:
return reporter.report(morfs, outfile=outfile)
except CoverageException:
delete_file = True
raise
finally:
if file_to_close:
file_to_close.close()
if delete_file:
file_be_gone(output_path) # pragma: part covered (doesn't return)
else:
msgfn(f"Wrote {reporter.report_type} to {output_path}")
def get_analysis_to_report(coverage, morfs):
"""Get the files to report on.
For each morf in `morfs`, if it should be reported on (based on the omit
and include configuration options), yield a pair, the `FileReporter` and
`Analysis` for the morf.
"""
file_reporters = coverage._get_file_reporters(morfs)
config = coverage.config
if config.report_include:
matcher = FnmatchMatcher(prep_patterns(config.report_include), "report_include")
file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
if config.report_omit:
matcher = FnmatchMatcher(prep_patterns(config.report_omit), "report_omit")
file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
if not file_reporters:
raise NoDataError("No data to report.")
for fr in sorted(file_reporters):
try:
analysis = coverage._analyze(fr)
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
# NotPython is only raised by PythonFileReporter, which has a
# should_be_python() method.
if fr.should_be_python():
if config.ignore_errors:
msg = f"Couldn't parse Python file '{fr.filename}'"
coverage._warn(msg, slug="couldnt-parse")
else:
raise
except Exception as exc:
if config.ignore_errors:
msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
coverage._warn(msg, slug="couldnt-parse")
else:
raise
else:
yield (fr, analysis)
``` |
{
"source": "jouve/django-oauth-toolkit-example",
"score": 3
} |
#### File: django-oauth-toolkit-example/example/middleware.py
```python
from django import http
from django.conf import settings
XS_SHARING_ALLOWED_ORIGINS = getattr(settings, "XS_SHARING_ALLOWED_ORIGINS", '*')
XS_SHARING_ALLOWED_METHODS = getattr(settings, "XS_SHARING_ALLOWED_METHODS", ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE'])
XS_SHARING_ALLOWED_HEADERS = getattr(settings, "XS_SHARING_ALLOWED_HEADERS", ['x-requested-with', 'content-type', 'accept', 'origin', 'authorization'])
class XsSharingMiddleware(object):
"""
This middleware allows cross-domain XHR using the html5 postMessage API.
eg.
Access-Control-Allow-Origin: http://api.example.com
Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE
Access-Control-Allow-Headers: ["Content-Type"]
"""
def process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS)
response['Access-Control-Allow-Headers'] = ",".join(XS_SHARING_ALLOWED_HEADERS)
return response
return None
def process_response(self, request, response):
# Avoid unnecessary work
if response.has_header('Access-Control-Allow-Origin'):
return response
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS)
return response
``` |
{
"source": "jouve/flake8",
"score": 2
} |
#### File: tests/integration/test_api_legacy.py
```python
from flake8.api import legacy
def test_legacy_api(tmpdir):
"""A basic end-to-end test for the legacy api reporting errors."""
with tmpdir.as_cwd():
t_py = tmpdir.join('t.py')
t_py.write('import os # unused import\n')
style_guide = legacy.get_style_guide()
report = style_guide.check_files([t_py.strpath])
assert report.total_errors == 1
```
#### File: tests/unit/test_decision_engine.py
```python
import argparse
import pytest
from flake8 import defaults
from flake8 import style_guide
def create_options(**kwargs):
"""Create and return an instance of argparse.Namespace."""
kwargs.setdefault('select', [])
kwargs.setdefault('extended_default_select', [])
kwargs.setdefault('ignore', [])
kwargs.setdefault('extend_ignore', [])
kwargs.setdefault('disable_noqa', False)
kwargs.setdefault('enable_extensions', [])
return argparse.Namespace(**kwargs)
@pytest.mark.parametrize('ignore_list,extend_ignore,error_code', [
(['E111', 'E121'], [], 'E111'),
(['E111', 'E121'], [], 'E121'),
(['E111'], ['E121'], 'E121'),
(['E11', 'E12'], [], 'E121'),
(['E2', 'E12'], [], 'E121'),
(['E2', 'E12'], [], 'E211'),
(['E2', 'E3'], ['E12'], 'E211'),
])
def test_was_ignored_ignores_errors(ignore_list, extend_ignore, error_code):
"""Verify we detect users explicitly ignoring an error."""
decider = style_guide.DecisionEngine(
create_options(ignore=ignore_list, extend_ignore=extend_ignore))
assert decider.was_ignored(error_code) is style_guide.Ignored.Explicitly
@pytest.mark.parametrize('ignore_list,extend_ignore,error_code', [
(['E111', 'E121'], [], 'E112'),
(['E111', 'E121'], [], 'E122'),
(['E11', 'E12'], ['E121'], 'W121'),
(['E2', 'E12'], [], 'E112'),
(['E2', 'E12'], [], 'E111'),
(['E2', 'E12'], ['W11', 'E3'], 'E111'),
])
def test_was_ignored_implicitly_selects_errors(ignore_list, extend_ignore,
error_code):
"""Verify we detect users does not explicitly ignore an error."""
decider = style_guide.DecisionEngine(
create_options(ignore=ignore_list, extend_ignore=extend_ignore))
assert decider.was_ignored(error_code) is style_guide.Selected.Implicitly
@pytest.mark.parametrize('select_list,enable_extensions,error_code', [
(['E111', 'E121'], [], 'E111'),
(['E111', 'E121'], [], 'E121'),
(['E11', 'E12'], [], 'E121'),
(['E2', 'E12'], [], 'E121'),
(['E2', 'E12'], [], 'E211'),
(['E1'], ['E2'], 'E211'),
([], ['E2'], 'E211'),
])
def test_was_selected_selects_errors(select_list, enable_extensions,
error_code):
"""Verify we detect users explicitly selecting an error."""
decider = style_guide.DecisionEngine(
options=create_options(select=select_list,
enable_extensions=enable_extensions),
)
assert decider.was_selected(error_code) is style_guide.Selected.Explicitly
def test_was_selected_implicitly_selects_errors():
"""Verify we detect users implicitly selecting an error."""
error_code = 'E121'
decider = style_guide.DecisionEngine(
create_options(
select=[],
extended_default_select=['E'],
),
)
assert decider.was_selected(error_code) is style_guide.Selected.Implicitly
@pytest.mark.parametrize('select_list,error_code', [
(['E111', 'E121'], 'E112'),
(['E111', 'E121'], 'E122'),
(['E11', 'E12'], 'E132'),
(['E2', 'E12'], 'E321'),
(['E2', 'E12'], 'E410'),
])
def test_was_selected_excludes_errors(select_list, error_code):
"""Verify we detect users implicitly excludes an error."""
decider = style_guide.DecisionEngine(create_options(select=select_list))
assert decider.was_selected(error_code) is style_guide.Ignored.Implicitly
@pytest.mark.parametrize(
'select_list,ignore_list,extend_ignore,error_code,expected', [
(['E111', 'E121'], [], [], 'E111', style_guide.Decision.Selected),
(['E111', 'E121'], [], [], 'E112', style_guide.Decision.Ignored),
(['E111', 'E121'], [], [], 'E121', style_guide.Decision.Selected),
(['E111', 'E121'], [], [], 'E122', style_guide.Decision.Ignored),
(['E11', 'E12'], [], [], 'E132', style_guide.Decision.Ignored),
(['E2', 'E12'], [], [], 'E321', style_guide.Decision.Ignored),
(['E2', 'E12'], [], [], 'E410', style_guide.Decision.Ignored),
(['E11', 'E121'], ['E1'], [], 'E112', style_guide.Decision.Selected),
(['E11', 'E121'], [], ['E1'], 'E112', style_guide.Decision.Selected),
(['E111', 'E121'], ['E2'], ['E3'], 'E122',
style_guide.Decision.Ignored),
(['E11', 'E12'], ['E13'], [], 'E132', style_guide.Decision.Ignored),
(['E1', 'E3'], ['E32'], [], 'E321', style_guide.Decision.Ignored),
([], ['E2', 'E12'], [], 'E410', style_guide.Decision.Ignored),
(['E4'], ['E2', 'E12', 'E41'], [], 'E410',
style_guide.Decision.Ignored),
(['E41'], ['E2', 'E12', 'E4'], [], 'E410',
style_guide.Decision.Selected),
(['E'], ['F'], [], 'E410', style_guide.Decision.Selected),
(['F'], [], [], 'E410', style_guide.Decision.Ignored),
(['E'], defaults.IGNORE, [], 'E126', style_guide.Decision.Selected),
(['W'], defaults.IGNORE, [], 'E126', style_guide.Decision.Ignored),
(['E'], defaults.IGNORE, [], 'W391', style_guide.Decision.Ignored),
(['E', 'W'], ['E13'], [], 'E131', style_guide.Decision.Ignored),
(defaults.SELECT, ['E13'], [], 'E131', style_guide.Decision.Ignored),
(defaults.SELECT, defaults.IGNORE, ['W391'], 'E126',
style_guide.Decision.Ignored),
(defaults.SELECT, defaults.IGNORE, [], 'W391',
style_guide.Decision.Selected),
]
)
def test_decision_for(select_list, ignore_list, extend_ignore, error_code,
expected):
"""Verify we decide when to report an error."""
decider = style_guide.DecisionEngine(
create_options(select=select_list,
ignore=ignore_list,
extend_ignore=extend_ignore))
assert decider.decision_for(error_code) is expected
@pytest.mark.parametrize(
'select,ignore,extend_select,enabled_extensions,error_code,expected', [
(defaults.SELECT, [], ['I1'], [], 'I100',
style_guide.Decision.Selected),
(defaults.SELECT, [], ['I1'], [], 'I201',
style_guide.Decision.Ignored),
(defaults.SELECT, ['I2'], ['I1'], [], 'I101',
style_guide.Decision.Selected),
(defaults.SELECT, ['I2'], ['I1'], [], 'I201',
style_guide.Decision.Ignored),
(defaults.SELECT, ['I1'], ['I10'], [], 'I101',
style_guide.Decision.Selected),
(defaults.SELECT, ['I10'], ['I1'], [], 'I101',
style_guide.Decision.Ignored),
(defaults.SELECT, [], [], ['U4'], 'U401',
style_guide.Decision.Selected),
(defaults.SELECT, ['U401'], [], ['U4'], 'U401',
style_guide.Decision.Ignored),
(defaults.SELECT, ['U401'], [], ['U4'], 'U402',
style_guide.Decision.Selected),
(['E', 'W'], ['E13'], [], [], 'E131', style_guide.Decision.Ignored),
(['E', 'W'], ['E13'], [], [], 'E126', style_guide.Decision.Selected),
(['E2'], ['E21'], [], [], 'E221', style_guide.Decision.Selected),
(['E2'], ['E21'], [], [], 'E212', style_guide.Decision.Ignored),
(['F', 'W'], ['C90'], ['I1'], [], 'C901',
style_guide.Decision.Ignored),
(['E', 'W'], ['C'], [], [], 'E131',
style_guide.Decision.Selected),
(defaults.SELECT, defaults.IGNORE, [], ['I'], 'I101',
style_guide.Decision.Selected),
(defaults.SELECT, defaults.IGNORE, ['G'], ['I'], 'G101',
style_guide.Decision.Selected),
(defaults.SELECT, ['G1'], ['G'], ['I'], 'G101',
style_guide.Decision.Ignored),
(defaults.SELECT, ['E126'], [], ['I'], 'I101',
style_guide.Decision.Selected),
(['E', 'W'], defaults.IGNORE, ['I'], [], 'I101',
style_guide.Decision.Ignored),
# TODO(sigmavirus24) Figure out how to exercise the final catch-all
# return statement
]
)
def test_more_specific_decision_for_logic(select, ignore, extend_select,
enabled_extensions, error_code,
expected):
"""Verify the logic of DecisionEngine.more_specific_decision_for."""
decider = style_guide.DecisionEngine(
create_options(
select=select, ignore=ignore,
extended_default_select=extend_select,
enable_extensions=enabled_extensions,
),
)
assert decider.more_specific_decision_for(error_code) is expected
```
#### File: tests/unit/test_statistics.py
```python
import pytest
from flake8 import statistics as stats
from flake8 import style_guide
DEFAULT_ERROR_CODE = 'E100'
DEFAULT_FILENAME = 'file.py'
DEFAULT_TEXT = 'Default text'
def make_error(**kwargs):
"""Create errors with a bunch of default values."""
return style_guide.Violation(
code=kwargs.pop('code', DEFAULT_ERROR_CODE),
filename=kwargs.pop('filename', DEFAULT_FILENAME),
line_number=kwargs.pop('line_number', 1),
column_number=kwargs.pop('column_number', 1),
text=kwargs.pop('text', DEFAULT_TEXT),
physical_line=None,
)
def test_key_creation():
"""Verify how we create Keys from Errors."""
key = stats.Key.create_from(make_error())
assert key == (DEFAULT_FILENAME, DEFAULT_ERROR_CODE)
assert key.filename == DEFAULT_FILENAME
assert key.code == DEFAULT_ERROR_CODE
@pytest.mark.parametrize('code, filename, args, expected_result', [
# Error prefix matches
('E123', 'file000.py', ('E', None), True),
('E123', 'file000.py', ('E1', None), True),
('E123', 'file000.py', ('E12', None), True),
('E123', 'file000.py', ('E123', None), True),
# Error prefix and filename match
('E123', 'file000.py', ('E', 'file000.py'), True),
('E123', 'file000.py', ('E1', 'file000.py'), True),
('E123', 'file000.py', ('E12', 'file000.py'), True),
('E123', 'file000.py', ('E123', 'file000.py'), True),
# Error prefix does not match
('E123', 'file000.py', ('W', None), False),
# Error prefix matches but filename does not
('E123', 'file000.py', ('E', 'file001.py'), False),
# Error prefix does not match but filename does
('E123', 'file000.py', ('W', 'file000.py'), False),
# Neither error prefix match nor filename
('E123', 'file000.py', ('W', 'file001.py'), False),
])
def test_key_matching(code, filename, args, expected_result):
"""Verify Key#matches behaves as we expect with fthe above input."""
key = stats.Key.create_from(make_error(code=code, filename=filename))
assert key.matches(*args) is expected_result
def test_statistic_creation():
"""Verify how we create Statistic objects from Errors."""
stat = stats.Statistic.create_from(make_error())
assert stat.error_code == DEFAULT_ERROR_CODE
assert stat.message == DEFAULT_TEXT
assert stat.filename == DEFAULT_FILENAME
assert stat.count == 0
def test_statistic_increment():
"""Verify we update the count."""
stat = stats.Statistic.create_from(make_error())
assert stat.count == 0
stat.increment()
assert stat.count == 1
def test_recording_statistics():
"""Verify that we appropriately create a new Statistic and store it."""
aggregator = stats.Statistics()
assert list(aggregator.statistics_for('E')) == []
aggregator.record(make_error())
storage = aggregator._store
for key, value in storage.items():
assert isinstance(key, stats.Key)
assert isinstance(value, stats.Statistic)
assert storage[stats.Key(DEFAULT_FILENAME, DEFAULT_ERROR_CODE)].count == 1
def test_statistics_for_single_record():
"""Show we can retrieve the only statistic recorded."""
aggregator = stats.Statistics()
assert list(aggregator.statistics_for('E')) == []
aggregator.record(make_error())
statistics = list(aggregator.statistics_for('E'))
assert len(statistics) == 1
assert isinstance(statistics[0], stats.Statistic)
def test_statistics_for_filters_by_filename():
"""Show we can retrieve the only statistic recorded."""
aggregator = stats.Statistics()
assert list(aggregator.statistics_for('E')) == []
aggregator.record(make_error())
aggregator.record(make_error(filename='example.py'))
statistics = list(aggregator.statistics_for('E', DEFAULT_FILENAME))
assert len(statistics) == 1
assert isinstance(statistics[0], stats.Statistic)
def test_statistic_for_retrieves_more_than_one_value():
"""Show this works for more than a couple statistic values."""
aggregator = stats.Statistics()
for i in range(50):
aggregator.record(make_error(code=f'E1{i:02d}'))
aggregator.record(make_error(code=f'W2{i:02d}'))
statistics = list(aggregator.statistics_for('E'))
assert len(statistics) == 50
statistics = list(aggregator.statistics_for('W22'))
assert len(statistics) == 10
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.