id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
382508
|
<reponame>x-web/social-network-user-influence-analysis
#!/usr/bin/python
# coding: utf-8
# nlp process of twitter data using nltk package
__author__ = "x-web"
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
import re
inputf = open('tweets.txt', 'r')
outputf = open('tweets_segment.txt', 'w')
# record every valid tweet's id
tidf = open('tid.txt', 'w')
english_stopwords = stopwords.words('english')
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', "#", '$', '%']
st = LancasterStemmer()
reword = re.compile(r'^[\w]+$', re.I)
noiseword = ['http', 'rt']
count = 1
def isWord(word):
mword = reword.match(word)
if mword and word not in noiseword:
return True
else:
return False
for tweet in inputf:
# 小写化
tweet_lower = [word for word in tweet.lower().split()]
# 分词
tweet_tokenized = [word.lower() for word in word_tokenize(tweet.decode("utf-8"))]
# 去停用词
tweet_filtered_stopwords = [word for word in tweet_tokenized if not word in english_stopwords]
# 去标点符号
tweet_filtered = [word for word in tweet_filtered_stopwords if not word in english_punctuations]
# 词干化
tweet_stemmed = [st.stem(word) for word in tweet_filtered]
# 抽取有意义的单词
tweet_filtered_nword = [word for word in tweet_stemmed if isWord(word)]
result = " ".join(tweet_filtered_nword)
if result != '':
outputf.write(result + "\n")
tidf.write(str(count) + "\n")
print count
count += 1
inputf.close();
outputf.close();
tidf.close();
print 'Done'
|
StarcoderdataPython
|
12836350
|
from . import submodule # noqa: F401
|
StarcoderdataPython
|
9796121
|
a=[]
n=int(input("size ??"))
for i in range(n):
data=int(input("enter the data..."))
a.append(data)
print(a)
a.sort()
print("after sorting..",a)
|
StarcoderdataPython
|
6413273
|
"""
03: Fitting Algorithm
=====================
A step-by-step overview of the algorithm for parameterizing neural power spectra.
"""
###################################################################################################
# Algorithmic Description
# -----------------------
#
# In this tutorial we will step through how the power spectrum model is fit.
#
# Note that this notebook is for demonstrative purposes, and does not represent
# recommended usage of how to fit power spectrum models.
#
# Broadly, the steps in the algorithm are:
#
# - 1) An initial fit of the aperiodic component is computed from the power spectrum
# - 2) This aperiodic fit is subtracted from the power spectrum, creating a flattened spectrum
# - 3) An iterative process identifies peaks in this flattened spectrum
# - 4) A full peak fit is re-fit from all of the identified peak candidates
# - 5) The peak fit is subtracted from the original power spectrum,
# creating a peak-removed power spectrum
# - 6) A final fit of the aperiodic component is taken of the peak-removed power spectrum
# - 7) The full model is reconstructed from the combination of the aperiodic and peak fits,
# and goodness of fit metrics are calculated.
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 4
# General imports
import matplotlib.pyplot as plt
# Import the FOOOF object
from fooof import FOOOF
# Import some internal functions
# These are used here to demonstrate the algorithm
# You do not need to import these functions for standard usage of the module
from fooof.sim.gen import gen_aperiodic
from fooof.plts.spectra import plot_spectra
from fooof.plts.annotate import plot_annotated_peak_search
# Import a utility to download and load example data
from fooof.utils.download import load_fooof_data
###################################################################################################
# Set whether to plot in log-log space
plt_log = False
###################################################################################################
# Load example data files needed for this example
freqs = load_fooof_data('freqs_2.npy', folder='data')
spectrum = load_fooof_data('spectrum_2.npy', folder='data')
###################################################################################################
# Initialize a FOOOF object, with some settings
# These settings will be more fully described later in the tutorials
fm = FOOOF(peak_width_limits=[1, 8], max_n_peaks=6, min_peak_height=0.15)
###################################################################################################
#
# Note that data can be added to a FOOOF object independent of fitting the model, using the
# :meth:`~fooof.FOOOF.add_data` method. FOOOF objects can also be used to plot data,
# prior to fitting any models.
#
###################################################################################################
# Add data to the object
fm.add_data(freqs, spectrum, [3, 40])
###################################################################################################
# Plot the power spectrum
fm.plot(plt_log)
###################################################################################################
#
# The FOOOF object stores most of the intermediate steps internally.
#
# For this notebook, we will first fit the full model, as normal, but then step through,
# and visualize each step the algorithm took to come to that final fit.
#
###################################################################################################
# Fit the power spectrum model
fm.fit(freqs, spectrum, [3, 40])
###################################################################################################
# Step 1: Initial Aperiodic Fit
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We start by taking an initial aperiodic fit. This goal of this fit is to get an initial
# fit that is good enough to get started with the fitting process.
#
###################################################################################################
# Do an initial aperiodic fit - a robust fit, that excludes outliers
# This recreates an initial fit that isn't ultimately stored in the FOOOF object
init_ap_fit = gen_aperiodic(fm.freqs, fm._robust_ap_fit(fm.freqs, fm.power_spectrum))
# Plot the initial aperiodic fit
_, ax = plt.subplots(figsize=(12, 10))
plot_spectra(fm.freqs, fm.power_spectrum, plt_log,
label='Original Power Spectrum', color='black', ax=ax)
plot_spectra(fm.freqs, init_ap_fit, plt_log, label='Initial Aperiodic Fit',
color='blue', alpha=0.5, linestyle='dashed', ax=ax)
###################################################################################################
# Step 2: Flatten the Spectrum
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The initial fit is then used to create a flattened spectrum.
#
# The initial aperiodic fit is subtracted out from the original data, leaving a flattened
# version of the data which no longer contains the aperiodic component.
#
###################################################################################################
# Recompute the flattened spectrum using the initial aperiodic fit
init_flat_spec = fm.power_spectrum - init_ap_fit
# Plot the flattened the power spectrum
plot_spectra(fm.freqs, init_flat_spec, plt_log,
label='Flattened Spectrum', color='black')
###################################################################################################
# Step 3: Detect Peaks
# ^^^^^^^^^^^^^^^^^^^^
#
# The flattened spectrum is then used to detect peaks. We can better isolate
# peaks in the data, as the aperiodic activity has been removed.
#
# The fitting algorithm uses an iterative procedure to find peaks in the flattened spectrum.
#
# For each iteration:
#
# - The maximum point of the flattened spectrum is found
#
# - If this point fails to pass the relative or absolute height threshold,
# the procedure halts
# - A Gaussian is fit around this maximum point
# - This 'guess' Gaussian is then subtracted from the flatted spectrum
# - The procedure continues to a new iteration with the new version of the flattened spectrum,
# unless `max_n_peaks` has been reached
#
###################################################################################################
# Plot the iterative approach to finding peaks from the flattened spectrum
plot_annotated_peak_search(fm)
###################################################################################################
# Step 4: Create Full Peak Fit
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Once the iterative procedure has halted and the peaks have been identified in the
# flattened spectrum, the set of identified 'guess' peaks, are then re-fit, all together.
# This creates the full peak fit of the data.
#
###################################################################################################
# Plot the peak fit: created by re-fitting all of the candidate peaks together
plot_spectra(fm.freqs, fm._peak_fit, plt_log, color='green', label='Final Periodic Fit')
###################################################################################################
# Step 5: Create a Peak-Removed Spectrum
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now that the peak component of the fit is completed and available, this fit is then
# used in order to try and isolate a better aperiodic fit.
#
# To do so, the peak fit is removed from the original power spectrum,
# leaving an 'aperiodic-only' spectrum for re-fitting.
#
###################################################################################################
# Plot the peak removed power spectrum, created by removing peak fit from original spectrum
plot_spectra(fm.freqs, fm._spectrum_peak_rm, plt_log,
label='Peak Removed Spectrum', color='black')
###################################################################################################
# Step 6: Re-fit the Aperiodic Component
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The initial aperiodic component fit we made was a robust fit approach that was
# used to get the fitting process started.
#
# With the peak-removed spectrum, we can now re-fit the aperiodic component, to
# re-estimate a better fit, without the peaks getting in the way.
#
###################################################################################################
# Plot the final aperiodic fit, calculated on the peak removed power spectrum
_, ax = plt.subplots(figsize=(12, 10))
plot_spectra(fm.freqs, fm._spectrum_peak_rm, plt_log,
label='Peak Removed Spectrum', color='black', ax=ax)
plot_spectra(fm.freqs, fm._ap_fit, plt_log, label='Final Aperiodic Fit',
color='blue', alpha=0.5, linestyle='dashed', ax=ax)
###################################################################################################
# Step 7: Combine the Full Model Fit
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now that we have the final aperiodic fit, we can combine the aperiodic components
# to create the full model fit.
#
# With this full model fit, we can also calculate the goodness of fit metrics,
# including the error of the fit and the R-squared of the fit, by comparing the
# full model fit to the original data.
#
###################################################################################################
# Plot full model, created by combining the peak and aperiodic fits
plot_spectra(fm.freqs, fm.fooofed_spectrum_, plt_log,
label='Full Model', color='red')
###################################################################################################
#
# The last stage is to calculate the goodness of fit metrics, meaning the fit error & R^2.
#
# At the end of the fitting process, the model object also organizes parameters, such as
# updating gaussian parameters to be peak parameters,
#
# These results are part of what are stored, and printed, as the model results.
#
###################################################################################################
# Print out the model results
fm.print_results()
###################################################################################################
#
# Altogether, the full model fit is now available, and can be plotted.
#
###################################################################################################
# Plot the full model fit of the power spectrum
# The final fit (red), and aperiodic fit (blue), are the same as we plotted above
fm.plot(plt_log)
###################################################################################################
# Addendum: Data & Model Component Attributes
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As you may have noticed through this tutorial, the :class:`~fooof.FOOOF` object keeps
# track of some versions of the original data as well as individual model components fits,
# as well as the final model fit, the ultimate outcome of the fitting procedure.
#
# These attributes in the FOOOF object are kept at the end of the fitting procedure.
# Though they are primarily computed for internal use (hence being considered 'private'
# attributes, with the leading underscore), they are accessible and potentially
# useful for some analyses, and so are briefly described here.
#
# Stored model components:
#
# - Aperiodic Component: ``_ap_fit``
#
# - This is the aperiodic-only fit of the data.
# - It is computed by generating a reconstruction of the measured aperiodic parameters
#
# - Periodic Component: ``_peak_fit``
#
# - This is the periodic-only (or peak) fit of the data.
# - It is computed by generating a reconstruction of the measured periodic (peak) parameters
#
# Stored data attributes:
#
# - Flattened Spectrum: ``_spectrum_flat``
#
# - The original data, with the aperiodic component removed
# - This is computed as ``power_spectrum`` - ``_ap_fit``
#
# - Peak Removed Spectrum: ``_spectrum_peak_rm``
#
# - The original data, with the periodic component (peaks) removed
# - This is computed as ``power_spectrum`` - ``_peak_fit``
#
###################################################################################################
# Conclusion
# ----------
#
# In this tutorial we have stepped through the parameterization algorithm for fitting
# power spectrum models.
#
# Next, we will continue to explore the FOOOF object by properly introducing and more
# fully describing the settings for the algorithm.
#
|
StarcoderdataPython
|
1801679
|
import sys
import pygame
import random
from settings import Settings
from game_stats import GameStats
from wizard import Wizard
from info import Info
from item import Item
from inventory import Inventory
from inventory_window import InventoryWindow
class WizardsBroth:
"""Overal class to manage game assist and behavior"""
def __init__(self):
pygame.init()
self.settings = Settings(self)
self.screen = pygame.display.set_mode((0, 0),
pygame.FULLSCREEN)
self.settings.screen_width = self.screen.get_rect().width
self.settings.screen_height = self.screen.get_rect().height
pygame.display.set_caption(self.settings.game_name)
self.wizard = Wizard(self)
self.inventory = Inventory(self)
self.inventory_window = InventoryWindow(self)
# Create an instance to game statistics
self.stats = GameStats(self)
self.info = Info(self)
self.items = pygame.sprite.Group()
self._create_items()
# Set the background color.
self.bg_color=(150,230,150)
def _create_items(self):
count = 3 #random.randint(1,3)
print(f"count {count}")
for i in range(count):
item = Item(self, random.randint(0,2))
item_width, item_height = item.rect.size
item.x = random.randint(0,self.settings.screen_width - 20)
item.y = random.randint(0,self.settings.screen_height - 20 )
self.items.add(item)
def _reset_and_start_game(self):
self.stats.game_active = True
self.inventory.items = []
self.inventory_window.prep_inventory()
def _pick_up_item(self):
if self.stats.can_pick_up:
print("voidaan nostaa")
else:
print("Täällä ei ole mitään kerättävää")
def _check_events(self):
# Watch for keyboard and mouse events.
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type == pygame.KEYUP:
self._check_keyup_events(event)
def _check_keyup_events(self, event):
if event.key == pygame.K_RIGHT:
self.wizard.moving_right = False
elif event.key == pygame.K_LEFT:
self.wizard.moving_left = False
elif event.key == pygame.K_UP:
self.wizard.moving_up = False
elif event.key == pygame.K_DOWN:
self.wizard.moving_down = False
def _check_keydown_events(self, event):
"""Respond to keypresses."""
if event.key == pygame.K_RIGHT:
self.wizard.moving_right = True
elif event.key == pygame.K_LEFT:
self.wizard.moving_left = True
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_UP:
self.wizard.moving_up = True
elif event.key == pygame.K_DOWN:
self.wizard.moving_down = True
elif event.key == pygame.K_SPACE:
self._pick_up_item()
elif event.key == pygame.K_o:
self.stats.more_info = True
elif event.key == pygame.K_ESCAPE:
self.stats.more_info = False
self.stats.show_inventory = False
elif event.key == pygame.K_p:
if not self.stats.game_active:
self._reset_and_start_game()
elif event.key == pygame.K_t:
self.stats.show_inventory = True
def __update_screen(self):
# Redraw the screen during each pass through the loop.
self.screen.fill(self.settings.bg_color)
self.wizard.blitme()
self.items.draw(self.screen)
if self.stats.show_inventory:
self.inventory_window.show_inventory()
if not self.stats.game_active and not self.stats.more_info:
self.info.show_message(self.settings.start_message)
if self.stats.more_info:
self.info.show_message(self.settings.instructions)
# Make the most recently drawn screen visible.
pygame.display.flip()
def _check_can_pick(self):
if pygame.sprite.spritecollideany(self.wizard, self.items):
self.stats.can_pick_up = True
else:
self.stats.can_pick_up = False
def run_game(self):
"""Start the main loop for game."""
while True:
self._check_events()
if self.stats.game_active:
self.wizard.update()
self._check_can_pick()
self.__update_screen()
if __name__ == '__main__':
ai = WizardsBroth()
ai.run_game()
|
StarcoderdataPython
|
8145419
|
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import time
import json
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import os
import urllib.request
from tqdm import tqdm
import datetime
caps = DesiredCapabilities.CHROME
caps['goog:loggingPrefs'] = {'performance': 'ALL'}
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--mute-audio")
driver = webdriver.Chrome(desired_capabilities=caps, options = chrome_options)
page = "https://www.vlive.tv/channel/C1B7AF/board/5464"
driver.get(page)
for i in range(1, 60):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
videos = soup.find_all('li', {'class': 'post_item--3Brrv'})
links = []
for video in videos:
link = video.find('a', {'class': 'post_area--3dKbo'}).attrs['href']
links.append(link)
print('# of videos found: ' + str(len(links)))
# links.reverse()
def process_browser_log_entry(entry):
response = json.loads(entry['message'])['message']
return response
for vlive in tqdm(links):
attempts = 0
r = requests.get("https://vlive.tv"+vlive)
get_request_soup = BeautifulSoup(r.text, 'html.parser')
script = get_request_soup.find('script', {'type': 'text/javascript'}).text.replace('window.__PRELOADED_STATE__=', "").split(',function', 1)[0]
video_obj = json.loads(script)
video_id = str(video_obj['postDetail']['post']['officialVideo']['videoSeq'])
title = video_obj['postDetail']['post']['title']
date = datetime.datetime.fromtimestamp(video_obj['postDetail']['post']['createdAt']/1000).strftime("%Y%m%d%H%M")
print(date, video_id, title)
video_path = "D:/izone/" + date + '_' + video_id
driver.get("https://vlive.tv/video/"+video_id)
time.sleep(5)
video_html = driver.page_source
while attempts < 5:
#handles unexpected page load failures
try:
browser_log = driver.get_log('performance')
events = [process_browser_log_entry(entry) for entry in browser_log]
new_events = []
for event in events:
try:
if 'apis.naver.com/rmcnmv/rmcnmv/vod/play/v2.0' in event['params']['request']['url']:
new_events.append(event)
except:
pass
if not len(new_events) == 0:
naver_link = new_events[0]['params']['request']['url'][0:177]
naver_r = requests.get(naver_link).json()
video_res = naver_r['videos']['list']
sorted_video_res = sorted(video_res, key = lambda k: k['encodingOption']['height'])
video_link = sorted_video_res[-1]['source']
else:
driver.refresh()
attempts+=1
break
except:
driver.refresh()
attempts+=1
video_attempts = 0
if os.path.exists(video_path):
for roots, dirs, files in os.walk(video_path):
if 'captions' in naver_r:
#if the video has captions
for language in naver_r['captions']['list']:
code_and_type = language['language'] + '-' + language['type']
sub_link = language['source']
# if not any(code_and_type in i for i in dirs):
if not os.path.exists(video_path + '/' + code_and_type + '/'):
os.mkdir(video_path + '/' + code_and_type)
urllib.request.urlretrieve(sub_link, video_path + '/' + code_and_type + '/' + code_and_type + ".vtt")
print('Acquired ' + code_and_type + '.vtt')
if not any('.mp4' in x for x in files):
#no video
while video_attempts < 5:
try:
urllib.request.urlretrieve(video_link, video_path + '/' + video_id + '.mp4')
print('Acquired ' + video_id + '.mp4')
break
except:
video_attempts += 1
pass
if not 'title.txt' in files:
#no title
with open(video_path + '/' + 'title.txt', 'w', encoding = 'utf-8') as f:
f.write(title)
#top level dir
break
else:
matching_id_dir = [x for x in os.listdir("D:/izone/") if video_id in x.split("_")[1]]
if not len(matching_id_dir) == 0:
matching_id_date = matching_id_dir[0].split("_")[0]
if not matching_id_date == date:
print('SAME VIDEO ID EXISTS, DIFFERENT DATE: ', date, video_id)
print(matching_id_dir[0])
break
os.mkdir(video_path)
while video_attempts < 5:
try:
urllib.request.urlretrieve(video_link, video_path + '/' + video_id+'.mp4')
print('Acquired ' + video_id + '.mp4')
break
except:
video_attempts += 1
pass
if 'captions' in naver_r:
#if video has captions
for language in naver_r['captions']['list']:
code_and_type = language['language'] + '-' + language['type']
sub_link = language['source']
os.mkdir(video_path + '/' + code_and_type)
urllib.request.urlretrieve(sub_link, video_path + '/' + code_and_type + '/' + code_and_type + ".vtt")
print('Acquired ' + code_and_type + '.vtt')
with open(video_path + '/' + 'title.txt', 'w', encoding = 'utf-8') as f:
f.write(title)
|
StarcoderdataPython
|
5082175
|
from calculo import Mathik
import pytest
class Test_Bhaskara:
@pytest.fixture
def mat(self):
return Mathik()
# dados parametrizados
testdata = [( 1, -2, 1, 1, 1, ''),
( 1, -5, 6, 2, 3, 2),
(10, 10, 10, 0,'', ''),
(10, 20, 10, 1, -1,'')
]
@pytest.mark.parametrize('a , b, c , nraiz, x1, x2 ', testdata)
# teste de Bhaskara
def testa_Bhaskara(self, mat, a, b , c , nraiz, x1, x2 ):
if nraiz == 0:
assert mat.calcula_raiz(a, b, c) == (nraiz)
elif nraiz == 1:
assert mat.calcula_raiz(a, b, c) == (nraiz, x1)
elif nraiz == 2:
assert mat.calcula_raiz(a, b, c) == (nraiz, x1, x2)
## def testa_uma_raiz(self, b): # inserido b, recebe a fixture B
## # codigo repetido b
## assert b.calcula_raiz(1,-2,1) == (1, 1)
##
## def testa_duas_raizes(self, b):
## assert b.calcula_raiz(1,-5, 6) == (2, 3, 2)
##
## def testa_zero_raizes(self, b):
## assert b.calcula_raiz(10,10, 10) == (0)
##
## def testa_raiz_negativa(self, b):
## assert b.calcula_raiz(10,20, 10) == (1,-1)
###
|
StarcoderdataPython
|
9680364
|
import os
import numpy as np
from rho_factor.gen_rho import generate_rho_values as g
gg = g.generate()
gg.azi = np.linspace(0, 180, 13)
gg.sza = np.linspace(0, 80, 21)
gg.vza = np.linspace(0, 80, 17)
gg.execute()
|
StarcoderdataPython
|
1893832
|
import json, os
PATH_ABS = os.path.abspath('.')
if os.environ.get('CONF_PATH') is None:
CONF_PATH = "../../config"
else:
CONF_PATH = os.environ.get('CONF_PATH')
PATH_CONF = os.path.join(PATH_ABS , CONF_PATH)
with open(os.path.join(PATH_CONF, "abi.json")) as file:
ABIS = json.load(file)
with open(os.path.join(PATH_CONF, "address.json")) as file:
ADRESSES = json.load(file)
|
StarcoderdataPython
|
3262480
|
<filename>lifted_hgp.py
import numpy as np
import ldpc.protograph as pt
from bposd.css import css_code
from bposd.stab import stab_code
def I(n):
return pt.identity(n)
class lifted_hgp(css_code):
def __init__(self,lift_parameter,a,b=None):
'''
Generates the lifted hypergraph product of the protographs a and b
'''
self.a=a
self.a_m,self.a_n=self.a.shape
if b is None:
self.b=pt.copy(self.a)
else:
self.b=b
self.b_m,self.b_n=self.b.shape
self.hx1_proto=np.kron(self.a,I(self.b_n))
self.hx2_proto=np.kron(I(self.a_m),self.b.T)
self.hx_proto=pt.hstack([self.hx1_proto,self.hx2_proto])
self.hz1_proto=np.kron(I(self.a_n),self.b)
self.hz2_proto=np.kron(self.a.T,I(self.b_m))
self.hz_proto=pt.hstack([self.hz1_proto,self.hz2_proto])
self.lift_parameter=lift_parameter
super().__init__(self.hx_proto.to_binary(lift_parameter),self.hz_proto.to_binary(lift_parameter))
@property
def protograph(self):
px=pt.vstack([pt.zeros(self.hz_proto.shape),self.hx_proto])
pz=pt.vstack([self.hz_proto,pt.zeros(self.hx_proto.shape)])
return pt.hstack([px,pz])
@property
def hx1(self):
return self.hx1_proto.to_binary(self.lift_parameter)
@property
def hx2(self):
return self.hx2_proto.to_binary(self.lift_parameter)
@property
def hz1(self):
return self.hz1_proto.to_binary(self.lift_parameter)
@property
def hz2(self):
return self.hz2_proto.to_binary(self.lift_parameter)
class bias_tailored_lifted_product(stab_code):
def __init__(self,lift_parameter,a,b=None):
lhgp=lifted_hgp(lift_parameter,a,b)
#Hadamard rotation
temp1=pt.hstack([pt.zeros(lhgp.hx1_proto.shape),lhgp.hz2_proto])
temp2=pt.hstack([lhgp.hx1_proto,pt.zeros(lhgp.hz2_proto.shape)])
self.hx_proto=pt.vstack([temp1,temp2])
temp1=pt.hstack([lhgp.hz1_proto,pt.zeros(lhgp.hx2_proto.shape)])
temp2=pt.hstack([pt.zeros(lhgp.hz1_proto.shape),lhgp.hx2_proto])
self.hz_proto=pt.vstack([temp1,temp2])
super().__init__(self.hx_proto.to_binary(lift_parameter),self.hz_proto.to_binary(lift_parameter))
@property
def protograph(self):
px=pt.vstack([pt.zeros(self.hz_proto.shape),self.hx_proto])
pz=pt.vstack([self.hz_proto,pt.zeros(self.hx_proto.shape)])
return pt.hstack([px,pz])
|
StarcoderdataPython
|
1926971
|
from typing import List, Tuple
from sqlalchemy import desc, asc
class TrainingDataManager(object):
def __init__(self, table_type):
self._table_type = table_type
self._session = None
def new_training_data(self) -> List[Tuple[bytes]]:
return self._session.query(self._table_type.text).filter(self._table_type.trained == 0).all()
def all_training_data(self, limit: int = None, order_by: str = None, order='desc') -> List[Tuple[bytes]]:
query = self._session.query(self._table_type.text)
if order_by and order == 'desc':
query = query.order_by(desc(order_by))
elif order_by and order == 'asc':
query = query.order_by(asc(order_by))
if limit:
query = query.limit(limit)
return query.all()
def mark_trained(self):
self._session.execute('UPDATE ' + self._table_type.__tablename__ + ' SET TRAINED = 1')
self._session.commit()
def mark_untrained(self):
self._session.execute('UPDATE ' + self._table_type.__tablename__ + ' SET TRAINED = 0')
self._session.commit()
def commit(self):
self._session.commit()
def store(self, data):
pass
|
StarcoderdataPython
|
6581078
|
<filename>module/monsters.py<gh_stars>1-10
import json
import random
from module import db
import copy
with open('../assets/monsters.json', encoding='utf-8') as f:
monsters = json.load(f)
def init():
global monsters
if all(i.isdecimal() for i in monsters.keys()):
return
for m_key in monsters["monsters"].keys():
for i, v in enumerate(monsters["monsters"][m_key]):
monster = monsters["default"].copy()
monster.update(v)
monsters["monsters"][m_key][i] = monster
monsters = monsters["monsters"]
def get(boss_lv=1, boss_id=None):
monster_division = monsters[str(max(i for i in map(int, monsters.keys()) if boss_lv % i == 0))]
if boss_id is None:
monster = random.choices(list(enumerate(monster_division)),
weights=[i["encounter rate"] for i in monster_division])[0]
else:
try:
monster = (boss_id, monster_division[boss_id])
except:
print("ERROR boss_id: ",boss_id)
return None
return copy.deepcopy(monster)
init()
|
StarcoderdataPython
|
11200952
|
<filename>tests/wrap/test_simple_soil_elements.py<gh_stars>10-100
import numpy as np
import o3seespy.extensions
import o3seespy as o3
import pytest
def test_2d_site_period():
osi = o3.OpenSeesInstance(ndm=2, ndf=2, state=3)
# Establish nodes
node_depths = np.arange(0, 10, 1)
n_node_rows = len(node_depths)
x_nodes = np.arange(0, 10, 1)
nx = len(x_nodes)
nd = {}
for yy in range(0, len(node_depths)):
for xx in range(nx):
# Establish left and right nodes
nd[f"X{xx}Y{yy}"] = o3.node.Node(osi, x_nodes[xx], -node_depths[yy])
# set x and y dofs equal for left and right nodes
o3.EqualDOF(osi, nd[f"X0Y{yy}"], nd[f"X{nx - 1}Y{yy}"], [o3.cc.X])
# Fix base nodes
for xx in range(nx):
o3.Fix2DOF(osi, nd[f"X{xx}Y{n_node_rows -1}"], o3.cc.FIXED, o3.cc.FIXED)
for yy in range(0, len(node_depths) - 1):
for xx in range(nx):
o3.Fix2DOF(osi, nd[f"X{xx}Y{yy}"], o3.cc.FREE, o3.cc.FIXED)
vs = 150.0
rho = 1.8
g_mod = vs ** 2 * rho
poissons_ratio = 0.3
e_mod = 2 * g_mod * (1 + poissons_ratio)
ele_thick = 1.0
soil_mat = o3.nd_material.ElasticIsotropic(osi, e_mod=e_mod, nu=poissons_ratio, rho=rho)
eles = []
for yy in range(0, len(node_depths) - 1):
for xx in range(nx - 1):
# def element
nodes = [nd[f"X{xx}Y{yy + 1}"], nd[f"X{xx + 1}Y{yy + 1}"], nd[f"X{xx + 1}Y{yy}"], nd[f"X{xx}Y{yy}"]]
eles.append(o3.element.SSPquad(osi, nodes, soil_mat, o3.cc.PLANE_STRAIN, ele_thick, 0.0, 0.0))
# set damping based on first eigen mode
angular_freq_sqrd = o3.get_eigen(osi, solver='fullGenLapack', n=1)
if hasattr(angular_freq_sqrd, '__len__'):
angular_freq = angular_freq_sqrd[0] ** 0.5
else:
angular_freq = angular_freq_sqrd ** 0.5
# o3.extensions.to_py_file(osi, 'many_eles_2d.py')
response_period = 2 * np.pi / angular_freq
expected_period = 4 * max(node_depths) / vs
print('response_period: ', response_period, expected_period, response_period / expected_period)
assert np.isclose(response_period, expected_period, rtol=0.01)
def _run_dyn_1d_site_response(region_based=None):
osi = o3.OpenSeesInstance(ndm=2, ndf=2, state=3)
# Establish nodes
node_depths = np.arange(0, 5, 1)
n_node_rows = len(node_depths)
x_nodes = np.arange(0, 2, 1)
nx = len(x_nodes)
nd = {}
for yy in range(0, len(node_depths)):
for xx in range(nx):
# Establish left and right nodes
nd[f"X{xx}Y{yy}"] = o3.node.Node(osi, x_nodes[xx], -node_depths[yy])
# set x and y dofs equal for left and right nodes
o3.EqualDOF(osi, nd[f"X0Y{yy}"], nd[f"X{nx - 1}Y{yy}"], [o3.cc.X])
# Fix base nodes
for xx in range(nx):
o3.Fix2DOF(osi, nd[f"X{xx}Y{n_node_rows -1}"], o3.cc.FIXED, o3.cc.FIXED)
for yy in range(0, len(node_depths) - 1):
for xx in range(nx):
o3.Fix2DOF(osi, nd[f"X{xx}Y{yy}"], o3.cc.FREE, o3.cc.FIXED)
vs = 150.0
rho = 1.8
g_mod = vs ** 2 * rho
poissons_ratio = 0.3
e_mod = 2 * g_mod * (1 + poissons_ratio)
ele_thick = 1.0
soil_mat = o3.nd_material.ElasticIsotropic(osi, e_mod=e_mod, nu=poissons_ratio, rho=rho)
eles = []
for yy in range(0, len(node_depths) - 1):
for xx in range(nx - 1):
# def element
nodes = [nd[f"X{xx}Y{yy + 1}"], nd[f"X{xx + 1}Y{yy + 1}"], nd[f"X{xx + 1}Y{yy}"], nd[f"X{xx}Y{yy}"]]
eles.append(o3.element.SSPquad(osi, nodes, soil_mat, o3.cc.PLANE_STRAIN, ele_thick, 0.0, 0.0))
freqs = np.array([0.5, 15.0])
xi = 0.1 # high damping to see effects
ang_f = np.pi / freqs
alpha_m = xi * 2.0 * ang_f[0] * ang_f[1] / (ang_f[0] + ang_f[1]) # mass proportional
beta_k = xi * 2.0 / (ang_f[0] + ang_f[1]) # stiffness proportional
if region_based == 'node':
o3.region.NodeRegion(osi, nodes='all', rayleigh={'alpha_m': alpha_m, 'beta_k_init': beta_k})
if region_based == 'ele':
o3.region.ElementRegion(osi, eles='all', rayleigh={'alpha_m': alpha_m, 'beta_k_init': beta_k})
else:
o3.rayleigh.Rayleigh(osi, alpha_m=alpha_m, beta_k=0.0, beta_k_init=beta_k, beta_k_comm=0.0)
# Define the dynamic analysis
dt = 0.01
vals = np.sin(np.linspace(0, np.pi, 50))
acc_series = o3.time_series.Path(osi, dt=dt, values=-vals) # should be negative
o3.pattern.UniformExcitation(osi, dir=o3.cc.X, accel_series=acc_series)
# Run the dynamic analysis
o3.wipe_analysis(osi)
o3.algorithm.Newton(osi)
o3.system.SparseGeneral(osi)
o3.numberer.RCM(osi)
o3.constraints.Transformation(osi)
o3.integrator.Newmark(osi, 0.5, 0.25)
o3.analysis.Transient(osi)
o3.test_check.EnergyIncr(osi, tol=1.0e-3, max_iter=10)
analysis_time = 1.0
analysis_dt = 0.001
rec_dt = 0.01
tn = o3.recorder.NodeToArrayCache(osi, nd["X0Y0"], [o3.cc.DOF2D_X], 'accel', dt=rec_dt)
if region_based:
o3.extensions.to_py_file(osi, 'wr.py')
else:
o3.extensions.to_py_file(osi, 'wor.py')
curr_time = o3.get_time(osi)
while curr_time < analysis_time:
status = o3.analyze(osi, 1, analysis_dt)
curr_time = o3.get_time(osi)
if status != 0:
print('Not ok')
print(status)
o3.wipe(osi)
x_acc = tn.collect()
return x_acc
def test_region_based_damping():
wnr = _run_dyn_1d_site_response(region_based='node')
wer = _run_dyn_1d_site_response(region_based='ele')
wor = _run_dyn_1d_site_response(region_based=None)
assert np.isclose(wor, wnr).all()
assert np.isclose(wor, wer).all()
if __name__ == '__main__':
test_2d_site_period()
test_region_based_damping()
|
StarcoderdataPython
|
4947837
|
<filename>novice/02-02/latihan/tes_sum2.py
def test_sum():
assert sum([1, 2, 3]) == 6, 'should be 6'
def test_sum_tuple():
assert sum((1, 2, 2)) == 5, 'should be 6'
if __name__=='__main__':
test_sum()
test_sum_tuple()
print('everyting passed')
|
StarcoderdataPython
|
9738190
|
<reponame>liubaishuo-github/peening-post-processor
import datetime
dt = datetime.datetime.now()
print(type(dt))
print(dt)
time_str = dt.strftime('%b-%d-%Y %H:%M:%S')
print(time_str)
|
StarcoderdataPython
|
11296133
|
<reponame>swederik/structurefunction
from nipype.interfaces.base import (BaseInterface, traits,
File, TraitedSpec, InputMultiPath,
isdefined)
from nipype.utils.filemanip import split_filename
import os.path as op
import numpy as np
import nibabel as nb
import networkx as nx
import scipy.io as sio
from nipype.workflows.misc.utils import get_data_dims
from nipype.interfaces.cmtk.nx import (remove_all_edges, add_node_data, add_edge_data)
from nipype import logging
iflogger = logging.getLogger('interface')
class CreateConnectivityThresholdInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, xor=[
'in_file4d'], desc='Original functional magnetic resonance image (fMRI) as a set of 3-dimensional images')
in_file4d = File(exists=True, mandatory=True, xor=[
'in_files'], desc='Original functional magnetic resonance image (fMRI) as a 4-dimension image')
time_course_file = File(exists=True, mandatory=True,
desc='Independent component timecourse array (as an image)')
segmentation_file = File(exists=True, mandatory=True,
desc='Image with segmented regions (e.g. aparc+aseg.nii or the output from cmtk.Parcellate())')
subject_id = traits.Str(desc='Subject ID')
out_t_value_threshold_file = File(
'tvalues.mat', usedefault=True, desc='T values per node saved as a Matlab .mat')
class CreateConnectivityThresholdOutputSpec(TraitedSpec):
t_value_threshold_file = File(
desc='Output matlab file containing the thresholding parameters for each region/node')
class CreateConnectivityThreshold(BaseInterface):
input_spec = CreateConnectivityThresholdInputSpec
output_spec = CreateConnectivityThresholdOutputSpec
def _run_interface(self, runtime):
iflogger.info(
'Segmentation image: {img}'.format(img=self.inputs.segmentation_file))
rois = get_roi_list(self.inputs.segmentation_file)
number_of_nodes = len(rois)
iflogger.info('Found {roi} unique region values'.format(roi=len(rois)))
if len(self.inputs.in_files) > 1:
iflogger.info('Multiple input images detected')
iflogger.info(len(self.inputs.in_files))
in_files = self.inputs.in_files
try:
# VERY tempermental solution to sorting the functional images.
# RELIES ON xyz-01_out.nii from recent resampling.
in_files = sorted(in_files, key=lambda x:
int(x.split("_")[-2].split("-")[-2]))
except IndexError:
# Tempermental solution to sorting the functional images.
# RELIES ON xyz_out.nii from recent resampling.
in_files = sorted(in_files, key=lambda x:
int(x.split("_")[-2]))
elif isdefined(self.inputs.in_file4d):
iflogger.info('Single four-dimensional image selected')
in_file4d = nb.load(self.inputs.in_file4d)
in_files = nb.four_to_three(in_file4d)
else:
iflogger.info('Single functional image provided')
in_files = self.inputs.in_files
rois = get_roi_list(self.inputs.segmentation_file)
fMRI_timecourse, _, _, _, _ = get_timecourse_by_region(
in_files, self.inputs.segmentation_file, rois)
timecourse_at_each_node = fMRI_timecourse.T
time_course_image = nb.load(self.inputs.time_course_file)
iflogger.info(np.shape(timecourse_at_each_node))
number_of_components = 30
number_of_images = 198
time_course_data = time_course_image.get_data()
onerow = np.ones(number_of_images)
time_course_per_IC = np.vstack((onerow.T, time_course_data.T)).T
iflogger.info(np.shape(time_course_per_IC))
y = timecourse_at_each_node
X = time_course_per_IC
n = number_of_images
XTX_inv = np.linalg.inv(np.dot(X.T, X))
N = number_of_components + 1
contrast = np.concatenate(
(np.zeros((1, number_of_components)), np.eye(number_of_components))).T
a = np.empty((number_of_nodes, N))
residues = np.empty((number_of_nodes, 1))
rank = np.empty((number_of_nodes, 1))
singularvalues = np.empty((number_of_nodes, N))
resids = np.empty((number_of_nodes, number_of_images))
error_variance = np.empty((number_of_nodes, 1))
t_values = np.empty((number_of_nodes, number_of_components))
beta_value = np.empty((number_of_components, 1))
for node_idx, node in enumerate(rois):
a[node_idx], residues[node_idx], rank[node_idx], singularvalues[
node_idx] = np.linalg.lstsq(X, y[:, node_idx])
resids[node_idx, :] = y[:, node_idx] - \
np.dot(X, a[node_idx]) # e = y - Xa;
error_variance[node_idx] = np.var(resids[node_idx])
for IC in range(0, number_of_components):
t_values[node_idx, IC] = np.dot(contrast[IC], a[node_idx]) / \
np.sqrt(
error_variance[node_idx] * np.dot(np.dot(contrast[IC], XTX_inv), contrast[IC, :].T))
beta_value[IC] = np.dot(contrast[IC], a[node_idx])
t_value_dict = {}
t_value_dict['t_value_per_node'] = t_values
t_value_dict['timecourse_at_each_node'] = y
t_value_dict['timecourse_per_IC'] = X
t_value_dict['number_of_images'] = n
t_value_dict['a'] = a
t_value_dict['contrast'] = contrast
t_value_dict['residuals'] = resids
t_value_dict['error_variance'] = error_variance
out_file = op.abspath(self.inputs.out_t_value_threshold_file)
iflogger.info(
'Saving T-values per node, per IC, as {file}'.format(file=out_file))
sio.savemat(out_file, t_value_dict)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
out_file = op.abspath(self.inputs.out_t_value_threshold_file)
outputs['t_value_threshold_file'] = out_file
return outputs
class ConnectivityGraphInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc='fMRI ICA Map')
resolution_network_file = File(
exists=True, mandatory=True, desc='Network resolution file')
t_value_threshold_file = File(
exists=True, mandatory=True, desc='T-value threshold per node per IC. Saved as a Matlab .mat file.')
component_index = traits.Int(
mandatory=True, desc='Index of the independent component to use from the t-value threshold file.')
segmentation_file = File(
exists=True, desc='Image with segmented regions (e.g. aparc+aseg.nii or the output from cmtk.Parcellate())')
subject_id = traits.Str(desc='Subject ID')
give_nodes_values = traits.Bool(
False, desc='Controls whether or not nodes are given scalar values from the functional image')
significance_threshold = traits.Float(
0.001, usedefault=True, desc='Significance threshold used to keep an edge.')
number_of_images = traits.Int(
198, usedefault=True, desc='Number of functional images used to generate the threshold file')
out_stats_file = File(
desc='Some simple image statistics for regions saved as a Matlab .mat')
out_network_file = File(desc='The output network as a NetworkX gpickle.')
class ConnectivityGraphOutputSpec(TraitedSpec):
stats_file = File(
desc='Some simple image statistics for the original and normalized images saved as a Matlab .mat')
network_file = File(
desc='Output gpickled network file for the connectivity graph.')
correlation_network = File(
desc='Output gpickled network file for the correlation network.')
anticorrelation_network = File(
desc='Output gpickled network file for the anticorrelation network.')
class ConnectivityGraph(BaseInterface):
"""
Creates a weighted connectivity graph given a 4D functional image or list of functional images given a segmentated image.
Output is saved in a MATLAB file, and if a network resolution file is provided (e.g. resolution1015.graphml), the regions are output as nodes in a NetworkX graph.
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> congraph = cmtk.ConnectivityGraph()
>>> congraph.inputs.in_file = 'pet_resliced_1.nii'
>>> congraph.inputs.segmentation_file = 'ROI_scale500.nii.gz'
>>> congraph.run() # doctest: +SKIP
"""
input_spec = ConnectivityGraphInputSpec
output_spec = ConnectivityGraphOutputSpec
def _run_interface(self, runtime):
key = 'congraph'
edge_key = 'weight'
iflogger.info(
'T-value Threshold file: {t}'.format(t=self.inputs.t_value_threshold_file))
iflogger.info(
'Independent component to use: {i}'.format(i=self.inputs.component_index))
path, name, ext = split_filename(self.inputs.t_value_threshold_file)
if ext == '.mat':
t_value_dict = sio.loadmat(self.inputs.t_value_threshold_file)
t_values = t_value_dict['t_value_per_node']
t_value_per_node = t_values[:, self.inputs.component_index - 1]
number_of_ICs = np.shape(t_values)[1]
else:
iflogger.info(
"Please save the t-values as a Matlab file with key 't_value_per_node'")
functional = nb.load(self.inputs.in_file)
functionaldata = functional.get_data()
segmentation = nb.load(self.inputs.segmentation_file)
segmentationdata = segmentation.get_data()
rois = get_roi_list(self.inputs.segmentation_file)
number_of_nodes = len(rois)
iflogger.info(
'Found {roi} unique region values'.format(roi=number_of_nodes))
iflogger.info('Significance threshold: {p}'.format(
p=self.inputs.significance_threshold))
#number_of_images = self.inputs.number_of_images
# degrees_of_freedom = number_of_images - \
# number_of_ICs - 1
#sig = self.inputs.significance_threshold
#threshold = tinv((1-sig), degrees_of_freedom)
#threshold = 2*threshold
#iflogger.info('Weight threshold: {w}'.format(w=threshold))
iflogger.info(
'Functional image: {img}'.format(img=self.inputs.in_file))
iflogger.info(
'Segmentation image: {img}'.format(img=self.inputs.segmentation_file))
if not get_data_dims(self.inputs.in_file) == get_data_dims(self.inputs.segmentation_file):
iflogger.error(
'Image dimensions are not the same, please reslice the images to the same dimensions')
dx, dy, dz = get_data_dims(self.inputs.in_file)
iflogger.error('Functional image dimensions: {dimx}, {dimy}, {dimz}'.format(
dimx=dx, dimy=dy, dimz=dz))
dx, dy, dz = get_data_dims(self.inputs.segmentation_file)
iflogger.error('Segmentation image dimensions: {dimx}, {dimy}, {dimz}'.format(
dimx=dx, dimy=dy, dimz=dz))
stats = {}
if self.inputs.give_nodes_values:
func_mean = []
for idx, roi in enumerate(rois):
values = []
x, y, z = np.where(segmentationdata == roi)
for index in range(0, len(x)):
value = functionaldata[x[index]][y[index]][z[index]]
values.append(value)
func_mean.append(np.mean(values))
iflogger.info(
'Region ID: {id}, Mean Value: {avg}'.format(id=roi, avg=np.mean(values)))
stats[key] = func_mean
connectivity_matrix = np.zeros((number_of_nodes, number_of_nodes))
correlation_matrix = np.zeros((number_of_nodes, number_of_nodes))
anticorrelation_matrix = np.zeros((number_of_nodes, number_of_nodes))
iflogger.info('Drawing edges...')
for idx_i, roi_i in enumerate(rois):
t_i = t_value_per_node[idx_i]
#iflogger.info('ROI:{i}, T-value: {t}'.format(i=roi_i, t=t_i))
for idx_j, roi_j in enumerate(rois):
t_j = t_value_per_node[idx_j]
#iflogger.info('...ROI:{j}, T-value: {t}'.format(j=roi_j, t=t_j))
if idx_j > idx_i:
if (t_i > 0 and t_j > 0) or (t_i < 0 and t_j < 0):
weight = abs(t_i) + abs(t_j) - abs(t_i - t_j)
#iflogger.info('Weight = {w} T-values for ROIs {i}-{j}'.format(w=weight, i=t_i, j=t_j))
# if weight > threshold:
connectivity_matrix[idx_i, idx_j] = weight
correlation_matrix[idx_i, idx_j] = weight
#iflogger.info('Drawing a correlation edge for ROIs {i}-{j} at {id_i},{id_j}'.format(i=roi_i, j=roi_j, id_i=idx_i, id_j=idx_j))
elif (t_i < 0 and t_j > 0) or (t_i > 0 and t_j < 0):
weight = abs(t_i) + abs(t_j) - abs(t_i + t_j)
#iflogger.info('Weight = {w} T-values for ROIs {i}-{j}'.format(w=weight, i=t_i, j=t_j))
# if weight > threshold:
connectivity_matrix[idx_i, idx_j] = -weight
anticorrelation_matrix[idx_i, idx_j] = weight
#iflogger.info('Drawing an anticorrelation edge for ROIs {i}-{j} at {id_i},{id_j}'.format(i=roi_i, j=roi_j, id_i=idx_i, id_j=idx_j))
#iflogger.info('Weight = {w} T-values for ROIs {i}-{j}'.format(w=weight, i=t_i, j=t_j))
edges = len(np.nonzero(connectivity_matrix)[0])
cor_edges = len(np.nonzero(correlation_matrix)[0])
anticor_edges = len(np.nonzero(anticorrelation_matrix)[0])
iflogger.info('Total edges: {e}'.format(e=edges))
iflogger.info('Total correlation edges: {c}'.format(c=cor_edges))
iflogger.info(
'Total anticorrelation edges: {a}'.format(a=anticor_edges))
connectivity_matrix = connectivity_matrix + connectivity_matrix.T
correlation_matrix = correlation_matrix + correlation_matrix.T
anticorrelation_matrix = anticorrelation_matrix + \
anticorrelation_matrix.T
stats[edge_key] = connectivity_matrix
stats['correlation'] = correlation_matrix
stats['anticorrelation'] = anticorrelation_matrix
try:
gp = nx.read_gpickle(self.inputs.resolution_network_file)
except IndexError:
gp = nx.read_graphml(self.inputs.resolution_network_file)
nodedict = gp.node[gp.nodes()[0]]
if not nodedict.has_key('dn_position'):
iflogger.info("Creating node positions from segmentation")
G = nx.Graph()
for u, d in gp.nodes_iter(data=True):
G.add_node(int(u), d)
xyz = tuple(
np.mean(np.where(np.flipud(segmentationdata) == int(d["dn_correspondence_id"])), axis=1))
G.node[int(u)]['dn_position'] = xyz
ntwkname = op.abspath('nodepositions.pck')
nx.write_gpickle(G, ntwkname)
else:
ntwkname = self.inputs.resolution_network_file
try:
ntwkname = nx.read_gpickle(ntwkname)
except IndexError:
ntwkname = nx.read_graphml(ntwkname)
newntwk = ntwkname.copy()
newntwk = remove_all_edges(newntwk)
if self.inputs.give_nodes_values:
newntwk = add_node_data(stats[key], newntwk)
corntwk = add_node_data(stats[key], newntwk)
anticorntwk = add_node_data(stats[key], newntwk)
newntwk = add_edge_data(stats[edge_key], newntwk)
corntwk = add_edge_data(stats['correlation'], corntwk)
anticorntwk = add_edge_data(stats['anticorrelation'], anticorntwk)
else:
newntwk = add_edge_data(stats[edge_key], ntwkname)
corntwk = add_edge_data(stats['correlation'], ntwkname)
anticorntwk = add_edge_data(stats['anticorrelation'], ntwkname)
if isdefined(self.inputs.out_network_file):
path, name, ext = split_filename(self.inputs.out_network_file)
if not ext == '.pck':
ext = '.pck'
out_network_file = op.abspath(name + ext)
else:
if isdefined(self.inputs.subject_id):
out_network_file = op.abspath(
self.inputs.subject_id + '_IC_' + str(self.inputs.component_index) + '.pck')
else:
out_network_file = op.abspath(
'IC_' + str(self.inputs.component_index) + '.pck')
path, name, ext = split_filename(out_network_file)
iflogger.info(
'Saving output network as {ntwk}'.format(ntwk=out_network_file))
nx.write_gpickle(newntwk, out_network_file)
out_correlation_network = op.abspath(name + '_correlation' + ext)
iflogger.info(
'Saving correlation network as {ntwk}'.format(ntwk=out_correlation_network))
nx.write_gpickle(corntwk, out_correlation_network)
out_anticorrelation_network = op.abspath(
name + '_anticorrelation' + ext)
iflogger.info('Saving anticorrelation network as {ntwk}'.format(
ntwk=out_anticorrelation_network))
nx.write_gpickle(anticorntwk, out_anticorrelation_network)
if isdefined(self.inputs.subject_id):
stats['subject_id'] = self.inputs.subject_id
if isdefined(self.inputs.out_stats_file):
path, name, ext = split_filename(self.inputs.out_stats_file)
if not ext == '.mat':
ext = '.mat'
out_stats_file = op.abspath(name + ext)
else:
if isdefined(self.inputs.subject_id):
out_stats_file = op.abspath(
self.inputs.subject_id + '_IC_' + str(self.inputs.component_index) + '.mat')
else:
out_stats_file = op.abspath(
'IC_' + str(self.inputs.component_index) + '.mat')
iflogger.info(
'Saving image statistics as {stats}'.format(stats=out_stats_file))
sio.savemat(out_stats_file, stats)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_stats_file):
path, name, ext = split_filename(self.inputs.out_stats_file)
if not ext == '.pck':
ext = '.pck'
out_stats_file = op.abspath(name + ext)
else:
if isdefined(self.inputs.subject_id):
out_stats_file = op.abspath(
self.inputs.subject_id + '_IC_' + str(self.inputs.component_index) + '.mat')
else:
out_stats_file = op.abspath(
'IC_' + str(self.inputs.component_index) + '.mat')
outputs["stats_file"] = out_stats_file
if isdefined(self.inputs.out_network_file):
path, name, ext = split_filename(self.inputs.out_network_file)
if not ext == '.pck':
ext = '.pck'
out_network_file = op.abspath(name + ext)
else:
if isdefined(self.inputs.subject_id):
out_network_file = op.abspath(
self.inputs.subject_id + '_IC_' + str(self.inputs.component_index) + '.pck')
else:
out_network_file = op.abspath(
'IC_' + str(self.inputs.component_index) + '.pck')
outputs["network_file"] = out_network_file
path, name, ext = split_filename(out_network_file)
out_correlation_network = op.abspath(name + '_correlation' + ext)
outputs["correlation_network"] = out_correlation_network
out_anticorrelation_network = op.abspath(
name + '_anticorrelation' + ext)
outputs["anticorrelation_network"] = out_anticorrelation_network
return outputs
|
StarcoderdataPython
|
3529768
|
<filename>Python3-Learn/spongebob.py
from turtle import *
def go_to(x, y):
up()
goto(x, y)
down()
def head():
go_to(-200, 180)
fillcolor('yellow')
begin_fill()
seth(-30)
for i in range(6):
circle(36, 60)
circle(-36, 60)
seth(-125)
for i in range(5):
circle(40,60)
circle(-40,60)
seth(-210)
for i in range(4):
circle(45,60)
circle(-45,60)
seth(65)
for i in range(5):
circle(40,60)
circle(-40,60)
end_fill()
def eye():
# Eye white
go_to(14, -5)
fillcolor('#f0f0f0')
begin_fill()
circle(65, 360)
end_fill()
begin_fill()
go_to(13,12)
seth(98)
circle(-65,360)
end_fill()
#eyeball
go_to(-10,20)
fillcolor('blue')
begin_fill()
circle(20,360)
end_fill()
go_to(-22,20)
fillcolor('black')
begin_fill()
circle(7,360)
end_fill()
go_to(40,15)
fillcolor('blue')
begin_fill()
circle(-20, 360)
end_fill()
go_to(53,15)
fillcolor('black')
begin_fill()
circle(-7,360)
end_fill()
#eyelash
go_to(-95,65)
left(20)
forward(40)
go_to(-50,87)
right(25)
forward(32)
go_to(0,70)
right(25)
forward(40)
go_to(40, 75)
left(35)
forward(40)
go_to(90, 87)
right(18)
forward(30)
go_to(120, 70)
right(25)
forward(40)
def nose():
fillcolor('yellow')
go_to(0, -7)
begin_fill()
right(50)
circle(-60, 30)
color('yellow')
goto(15,-40)
end_fill()
color('black')
go_to(0, -7)
seth(-75)
forward(30)
go_to(30,-7)
seth(-105)
forward(30)
def mouth():
go_to(-120, - 60)
seth(-45)
circle(200, 30)
seth(0)
forward(100)
seth(15)
circle(200, 30)
def tooth():
go_to(-30,-114)
seth(-95)
fillcolor('white')
begin_fill()
forward(30)
seth(0)
forward(40)
seth(95)
forward(30)
go_to(-30,-114)
end_fill()
go_to(30, -114)
seth(-95)
fillcolor('white')
begin_fill()
forward(30)
seth(0)
forward(40)
seth(95)
forward(30)
go_to(60, -114)
end_fill()
def hole():
go_to(-160,160)
# fillcolor('#ffd700')
# begin_fill()
circle(30, 360)
# a=1
# for i in range(120):
# if 0<=i<30 or 60<=i<90:
# a=a+0.2
# lt(3)
# forward(a)
# else:
# a=a-0.2
# lt(3)
# forward(a)
# end_fill()
def face():
eye()
nose()
mouth()
tooth()
# hole()
def body():
go_to(-170,-180)
seth(-120)
circle(150, 30)
seth(0)
forward(40)
seth(100)
forward(35)
seth(-80)
forward(100)
fillcolor('brown')
begin_fill()
seth(0)
forward(300)
seth(80)
forward(110)
seth(-100)
forward(65)
seth(180)
forward(315)
go_to(-118,-400)
end_fill()
go_to(-170,-255)
fillcolor('yellow')
begin_fill()
seth(-75)
forward(80)
seth(0)
forward(17)
seth(105)
forward(85)
end_fill()
go_to(200, -170)
seth(-60)
circle(-150,30)
seth(-180)
forward(45)
begin_fill()
seth(0)
forward(20)
seth(-100)
forward(85)
seth(180)
forward(20)
end_fill()
def tie():
go_to(-50,-225)
seth(-40)
forward(40)
seth(30)
forward(52)
go_to(30,-225)
seth(-30)
forward(40)
seth(40)
forward(45)
fillcolor('red')
go_to(0, -240)
begin_fill()
seth(-60)
forward(10)
seth(0)
forward(30)
seth(60)
forward(15)
go_to(30,-225)
end_fill()
go_to(4,-250)
begin_fill()
seth(-100)
forward(80)
seth(0)
forward(55)
seth(100)
forward(80)
end_fill()
def spongeBob():
head()
face()
body()
tie()
if __name__=='__main__':
screensize(800, 600)
pensize(3)
speed(10)
go_to(0, 0)
spongeBob()
go_to(-100,240)
write('<NAME>',font=('BRUSHSCI.TTF', '30', 'bold'))
mainloop()
|
StarcoderdataPython
|
5001973
|
from typing import Dict, List
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
import altair_transform
@pytest.fixture
def data() -> pd.DataFrame:
return pd.DataFrame(
{
"x": [[1, 2, 3], [4, 5, 6, 7], [8, 9]],
"y": [[1, 2], [3, 4], [5, 6]],
"cat": list("ABC"),
}
)
def test_flatten_transform(data: pd.DataFrame) -> None:
out = altair_transform.apply(data, {"flatten": ["x"]})
assert out.shape == (9, 3)
assert out.columns.tolist() == ["x", "y", "cat"]
assert_equal(out.x.values, range(1, 10))
assert_equal(out.cat.values, list("AAABBBBCC"))
out = altair_transform.apply(data, {"flatten": ["x", "y"]})
assert out.shape == (9, 3)
assert out.columns.tolist() == ["x", "y", "cat"]
assert_equal(out.x.values, range(1, 10))
assert_equal(out.y.values, [1, 2, np.nan, 3, 4, np.nan, np.nan, 5, 6])
assert_equal(out.cat.values, list("AAABBBBCC"))
def test_flatten_transform_with_as(data: pd.DataFrame):
out = altair_transform.apply(data, {"flatten": ["y"], "as": ["yflat"]})
assert out.shape == (6, 4)
assert out.columns.tolist() == ["yflat", "x", "y", "cat"]
assert_equal(out.yflat.values, range(1, 7))
assert_equal(out.cat.values, list("AABBCC"))
out = altair_transform.apply(
data, {"flatten": ["x", "y"], "as": ["xflat", "yflat"]}
)
assert out.shape == (9, 5)
assert out.columns.tolist() == ["xflat", "yflat", "x", "y", "cat"]
assert_equal(out.xflat.values, range(1, 10))
assert_equal(out.yflat.values, [1, 2, np.nan, 3, 4, np.nan, np.nan, 5, 6])
assert_equal(out.cat.values, list("AAABBBBCC"))
@pytest.mark.parametrize(
"transform",
[
{"flatten": ["x"]},
{"flatten": ["x"], "as": ["xflat"]},
{"flatten": ["x", "y"]},
{"flatten": ["x", "y"], "as": ["xflat"]},
{"flatten": ["x", "y"], "as": ["xflat", "yflat"]},
],
)
def test_flatten_against_js(
driver, data: pd.DataFrame, transform: Dict[str, List[str]],
) -> None:
got = altair_transform.apply(data, transform)
want = driver.apply(data, transform)
assert_frame_equal(
got[sorted(got.columns)],
want[sorted(want.columns)],
check_dtype=False,
check_index_type=False,
check_less_precise=True,
)
|
StarcoderdataPython
|
4963467
|
<filename>lollylib/test.py
import unittest
import os
import sys
import ntpath
def run(test_name='all'):
loader = unittest.TestLoader()
head, tail = ntpath.split(os.path.realpath(__file__))
sys.path.append(head)
start_dir = head + '/tests'
if test_name == 'all':
suite = loader.discover(start_dir)
else:
suite = loader.discover(start_dir, test_name + '*')
runner = unittest.TextTestRunner()
runner.run(suite)
|
StarcoderdataPython
|
1728089
|
<filename>tmp/utils/thread.py
import threading
class MyThread(threading.Thread):
def __init__(self, func, *args, **kwargs): # 改变线程的使用方式,可以直接传递函数方法和函数参数
super(MyThread, self).__init__()
self.func = func
self.args = args
self.kwargs = kwargs
self.result = None
def run(self):
self.result = self.func(*self.args, **self.kwargs) # 为线程添加属性result存储运行结果
|
StarcoderdataPython
|
8018850
|
<gh_stars>1-10
def flatten(*args):
return helper(args, [])
def helper(args, res):
for i in args:
if isinstance(i, list):
helper(i, res)
else:
res.append(i)
return res
|
StarcoderdataPython
|
1789544
|
<reponame>ATMackay/bsv-x509<gh_stars>0
# This programme contains functions and classes for secp256k1 elliptic curve cryptography
import numpy as np
import hashlib
import random
from getpass import getpass
#Hard coded varaibles
# secp256k1 parameters
secp_G = [int("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16),\
int("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)]
secp_n = int("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp_p = int("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp_a = int(0)
secp_b = int(7)
secp_inf_point = [0, 0]
#Symmetric Key
sym_key_n = int(2**512)
#Base58 and Base64 encoding
alphabet_58 = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
base58_count = len(alphabet_58)
alphabet_64 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
base64_count = len(alphabet_64)
def passwd():
i = 0
while i < 4:
pass_attempt = getpass()
if password_check(pass_attempt) != True:
if i > 2:
print("\nPassword authentication failed. Aborting....")
quit()
print("\nPassword attempt failed. You have "+str(3-i)+" remaining attempts.")
i += 1
else:
break
def password_check(password_attempt):
if type(password_attempt) != str:
raise Exception("password input must be a string!")
# Uses a hard-coded password hash (insecure)
password_hash = '<PASSWORD>'
if hashlib.sha256(password_attempt.encode()).hexdigest() == password_hash:
return True
else:
return False
def bin_array(integer):
# Returns a binarry array representation of a positive intger
if type(integer)!= int or integer < 0:
raise Exception("input must be a positive integer!")
return [int(x) for x in bin(integer)[2:]]
class base_58(object):
def encode(self, num):
""" Returns num in a base64-encoded string"""
encode = ''
if (num < 0):
return ''
while (num >= base58_count):
mod = num % base58_count
encode = alphabet_58[mod] + encode
num = num // base58_count
if (num):
encode = alphabet_58[num] + encode
return encode
def decode(self, s):
"""Decodes the base58-encoded string s into an integer"""
decoded = 0
multi = 1
s = s[::-1]
for char in s:
decoded += multi * alphabet_58.index(char)
multi = multi * base58_count
return decoded
class base_64(object):
def encode(self, num):
""" Returns num in a base58-encoded string"""
encode = ''
if (num < 0):
return ''
while (num >= base64_count):
mod = num % base64_count
encode = alphabet_64[mod] + encode
num = num // base64_count
if (num):
encode = alphabet_64[num] + encode
padding = "="
return encode + padding
def decode(self, s):
if s[len(s)-1]!= '=':
raise Exception("Base64 encoded object not formatted correctly. String should end with '='.")
s = s[:len(s)-1]
decoded = 0
multi = 1
s = s[::-1]
for char in s:
decoded += multi * alphabet_64.index(char)
multi = multi * base64_count
return decoded
#Integer math
class intmath(object):
def mod_inv(self, num, modulus):
# finds inverse of a modulo p, assummes a and p are coprime
if type(num) != int:
raise Exception("Inputs must be integer values")
if num <= 0 or num > secp_p:
num = num % modulus
if num == 1:
return a
# Find gcd using Extended Euclid's Algorithm
gcd, x, y = intmath().extended_euclid_gcd(num, modulus)
# In case x is negative, we handle it by adding extra M
# Because we know that multiplicative inverse of A in range M lies
# in the range [0, M-1]
if x < 0:
x += modulus
return x
def extended_euclid_gcd(self, a, b):
"""
Returns a list `result` of size 3 where:
Referring to the equation ax + by = gcd(a, b)
result[0] is gcd(a, b)
result[1] is x
result[2] is y
"""
s = 0; old_s = 1
t = 1; old_t = 0
r = b; old_r = a
while r != 0:
quotient = old_r//r
# In Python, // operator performs integer or floored division
# This is a pythonic way to swap numbers
# See the same part in C++ implementation below to know more
old_r, r = r, old_r - quotient*r
old_s, s = s, old_s - quotient*s
old_t, t = t, old_t - quotient*t
return [old_r, old_s, old_t]
def sqrtmod(self, a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime and p must be equivalent to 3 mod 4.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned if no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if p % 4 == 3:
power = (p+1) // 4
return pow(a, power , p)
elif a == 0:
return 0
elif p == 2:
return 0
elif intmath().legendre_symbol(a, p) != 1:
return 0
else:
raise Exception("exponent must be 0, 1, 2 or prime and equivalent to 3 mod 4")
def legendre_symbol(self, a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, int((p - 1) / 2), p)
return -1 if ls == p - 1 else ls
# secp256k1
class libsecp(object):
def private_key(self):
return random.randint(1,secp_n)
def public_key(self, priv_key):
if priv_key > secp_n or priv_key < 0 or type(priv_key) != int:
raise Exception("Private key must be an integer between 1 and n.")
return point_mul(priv_key, secp_G)
def public_key_hex(self, pubkey):
if type(pubkey[0]) != int or type(pubkey[1]) != int:
raise Exception("input must be valid (x,y) coordinate.")
if pubkey[0] > secp_p or pubkey[0] < 0 or pubkey[1] > secp_p or pubkey[1] < 0:
raise Exception("input must be valid secp256k1 element.")
pubkey_x = hex(pubkey[0])
pubkey_y = hex(pubkey[1])
result = '0x04' + str(pubkey_x[2:]).zfill(64) + str(pubkey_y[2:]).zfill(64)
return result
def compress_key(self, pub_key):
"""Takes a hexadecimal encoded or integer array public key
Returns a compressed public key '0x02....' or '0x03...' """
if type(pub_key) == str:
if len(pub_key) != 132:
raise Exception("Incorrect public key formatting.")
pub_key_x = int(pub_key[4:68], 16)
pub_key_y = int(pub_key[68:132], 16)
elif len(pub_key) == 2:
pub_key_x = pub_key[0]
pub_key_y = pub_key[1]
else:
raise Exception("incorrect public key formatting.")
if pub_key_x > secp_p or pub_key_x < 0 or pub_key_y > secp_p or pub_key_y < 0:
raise Exception("public key values outside the accepted range!")
if pub_key_y < secp_p // 2:
"""If the y-coordinate is less than (secp256k1) p then y is a "negative" EC point"""
pref = '02'
else:
pref = '03'
result = '0x' + pref + str(hex(pub_key_x)[2:])
return result
def decompress_key(self, comp_pub_key):
raise Exception("Not currently supported.")
"""Calculate the modular square root of x^3 + 7"""
if len(comp_pub_key) != 68:
raise Exception("public key must be a 32 byte string")
if comp_pub_key[0:4]!='0x02' and comp_pub_key[0:4]!='0x03':
raise Exception("Compressed key not formatted correctly!")
# Convert back to integer
pub_key_x = int(comp_pub_key[4:], 16)
rhs = (pow(pub_key_x,3) + secp_a*pub_key_x + secp_b) % secp_p
y_sol1 = intmath().sqrtmod(rhs, secp_p)
y_sol2 = (secp_p - y_sol1)
if pow(y_sol1, 2, secp_p) == rhs and pow(y_sol2, 2, secp_p) == rhs:
if comp_pub_key[0:4] == '0x02':
hex_y_neg = hex(min(y_sol1, y_sol2))
return '0x04' + str(comp_pub_key[4:]) + hex_y_neg[2:]
if comp_pub_key[0:4] == '0x03':
hex_y_pos = hex(max(y_sol1, y_sol2))
return '0x04' + str(comp_pub_key[4:]) + hex_y_pos[2:]
else:
raise Exception("Decompression Failed.")
def wif(self, priv_key):
prefix = "L"
base58_enc = base_58().encode(priv_key)
result = prefix + base58_enc
return result
def decode_wif(self, priv_key_string):
if type(priv_key_string) != str or priv_key_string[0] != 'L' or len(priv_key_string) > 50:
raise Exception("WIF private key not formatted correctly.")
priv_key = base_58().decode(priv_key_string[1:])
return priv_key
def point_double(self, A):
return point_add(A,A)
def point_add(self, A, B):
# input 2 elliptic curve points
if A==secp_inf_point:
return B
if B == secp_inf_point:
return A
if len(A)!=2 or len(B)!=2:
raise Exception("public key must be an array of length 2!")
if type(A[0]) != int or type(B[0]) != int:
raise Exception("EC curve point must be an array of integers!")
if A[0] >= secp_n or A[0] < 0 or A[1] < 0 or A[1] >= secp_n:
raise Exception("input parameter 1 outside the accepted range!")
if B[0] >= secp_n or B[0] < 0 or B[1] < 0 or B[1] >= secp_n:
raise Exception("input parameter 2 outside the accepted range!")
# if A is not equal to B then use this formula
if A!=B:
C_x = (pow(B[1]-A[1],2,secp_p)*pow(intmath().mod_inv(B[0]-A[0],secp_p),2,secp_p) - A[0] - B[0]) % secp_p
C_y = ((B[1]-A[1])*intmath().mod_inv(B[0]-A[0],secp_p)*(A[0]-C_x) - A[1]) % secp_p
return [C_x, C_y]
# if A is equal to B then use this formula
if A==B:
C_x = (pow(3*pow(A[0],2,secp_p) + secp_a,2,secp_p)*pow(intmath().mod_inv(2*A[1],secp_p),2,secp_p) - 2*A[0]) % secp_p
C_y = ((3*pow(A[0],2,secp_p) + secp_a)*intmath().mod_inv(2*A[1],secp_p) + A[0] - C_x - A[1] )% secp_p
return [C_x, C_y]
def point_mul(self, m, B):
if m == 0:
return secp_inf_point
if m == 1:
return B
if len(B)!=2:
raise Exception("public key must be an array of length 2!")
if type(m) != int or type(B[0]) != int:
raise Exception("EC curve point must be an array of integers!")
if m >= secp_n or m < 0:
raise Exception("Input parameter 1 outside the accepted range!")
if B[0] >= secp_n or B[0] < 0 or B[1] < 0 or B[1] >= secp_n:
raise Exception("Input parameter 2 outside the accepted range!")
m_bin_array = bin_array(m)
double_point = B
point_sum = secp_inf_point
for i in range(len(m_bin_array)):
if m_bin_array[len(m_bin_array)-i-1]==1:
point_sum = libsecp().point_add(double_point, point_sum)
double_point = libsecp().point_add(double_point,double_point) # This is not quite right!!
return point_sum
def key_store(num_keys):
for i in range(num_keys):
privkey = libsecp().private_key()
pubkey = libsecp().public_key(privkey)
wallet_key = libsecp().wif(privkey)
compressedkey = libsecp256k1().compress_key(pubkey)
store.append([wallet_key, pubkey, compressedkey])
return store
def symmetric_key():
return random.randint(1, sym_key_n)
|
StarcoderdataPython
|
5153741
|
<reponame>Tamlyn78/geo
from django.apps import AppConfig
class JobConfig(AppConfig):
name = 'job'
|
StarcoderdataPython
|
329378
|
import logging
import time
from datetime import datetime as dt
import pytz
from colorfield.fields import ColorField
from django.contrib.auth.models import Group
from django.contrib.gis.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.urls import reverse
from django.utils.timezone import now
from wx.enums import FlashTypeEnum
class BaseModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Decoder(BaseModel):
name = models.CharField(
max_length=40
)
description = models.CharField(
max_length=256
)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Country(BaseModel):
code = models.CharField(max_length=2)
name = models.CharField(max_length=256, unique=True)
class Meta:
verbose_name_plural = "countries"
def __str__(self):
return self.name
class Interval(BaseModel):
symbol = models.CharField(max_length=8, )
description = models.CharField(max_length=40)
default_query_range = models.IntegerField(default=0)
seconds = models.IntegerField(null=True)
class Meta:
ordering = ('symbol',)
def __str__(self):
return self.symbol
class PhysicalQuantity(BaseModel):
name = models.CharField(
max_length=16,
unique=True
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class MeasurementVariable(BaseModel):
name = models.CharField(
max_length=40,
unique=True
)
physical_quantity = models.ForeignKey(
PhysicalQuantity,
on_delete=models.DO_NOTHING
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class CodeTable(BaseModel):
name = models.CharField(
max_length=45,
unique=True
)
description = models.CharField(
max_length=256,
)
def __str__(self):
return self.name
class Unit(BaseModel):
symbol = models.CharField(
max_length=16,
unique=True
)
name = models.CharField(
max_length=256,
unique=True
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class SamplingOperation(BaseModel):
"""Old sampling_operations table"""
symbol = models.CharField(
max_length=5,
unique=True
)
name = models.CharField(
max_length=40,
unique=True
)
class Meta:
ordering = ('symbol',)
def __str__(self):
return self.name
class Variable(BaseModel):
"""Old element table"""
variable_type = models.CharField(
max_length=40,
)
symbol = models.CharField(
max_length=8,
)
name = models.CharField(
max_length=40,
)
sampling_operation = models.ForeignKey(
SamplingOperation,
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
measurement_variable = models.ForeignKey(
MeasurementVariable,
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
unit = models.ForeignKey(
Unit,
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
precision = models.IntegerField(
null=True,
blank=True,
)
scale = models.IntegerField(
null=True,
blank=True,
)
code_table = models.ForeignKey(
CodeTable,
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
color = ColorField(default='#FF0000', null=True, blank=True)
range_min = models.IntegerField(
null=True,
blank=True,
)
range_max = models.IntegerField(
null=True,
blank=True,
)
default_representation = models.CharField(
max_length=60,
null=True,
blank=True,
default='line',
choices=[('line', 'Line'), ('point', 'Point'), ('bar', 'Bar'), ('column', 'Column')])
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class DataSource(BaseModel):
symbol = models.CharField(max_length=8, unique=True)
name = models.CharField(max_length=32, unique=True)
base_url = models.URLField(null=True)
location = models.CharField(max_length=256, null=True)
class Meta:
verbose_name = "data source"
verbose_name_plural = "data sources"
def __str__(self):
return self.name
class StationProfile(BaseModel):
name = models.CharField(max_length=45)
description = models.CharField(max_length=256)
color = models.CharField(max_length=7)
is_automatic = models.BooleanField(default=False)
is_manual = models.BooleanField(default=True)
class Meta:
verbose_name = "station profile"
verbose_name_plural = "station profiles"
def __str__(self):
return self.name
class AdministrativeRegionType(BaseModel):
name = models.CharField(max_length=45)
class Meta:
verbose_name = "administrative region type"
verbose_name_plural = "administrative region types"
def __str__(self):
return self.name
class AdministrativeRegion(BaseModel):
name = models.CharField(max_length=45)
country = models.ForeignKey(Country, on_delete=models.DO_NOTHING)
administrative_region_type = models.ForeignKey(AdministrativeRegionType, on_delete=models.DO_NOTHING)
class Meta:
verbose_name = "administrative region"
verbose_name_plural = "administrative regions"
def __str__(self):
return self.name
class StationType(BaseModel):
name = models.CharField(max_length=45)
description = models.CharField(max_length=256)
parent_type = models.ForeignKey('self', on_delete=models.DO_NOTHING, null=True)
class Meta:
verbose_name = "station type"
verbose_name_plural = "station types"
def __str__(self):
return self.name
class StationCommunication(BaseModel):
name = models.CharField(max_length=45)
description = models.CharField(max_length=256)
color = models.CharField(max_length=7)
class Meta:
verbose_name = "station communication"
verbose_name_plural = "station communications"
def __str__(self):
return self.description
class WMOStationType(BaseModel):
name = models.CharField(max_length=256, unique=True)
description = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return self.name
class WMORegion(BaseModel):
name = models.CharField(max_length=256, unique=True)
description = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return self.name
class WMOProgram(BaseModel):
name = models.CharField(max_length=256, unique=True)
description = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return self.name
class Station(BaseModel):
name = models.CharField(max_length=256)
alias_name = models.CharField(max_length=256, null=True, blank=True)
begin_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
longitude = models.FloatField(validators=[
MinValueValidator(-180.), MaxValueValidator(180.)
])
latitude = models.FloatField(validators=[
MinValueValidator(-90.),
MaxValueValidator(90.)
])
elevation = models.FloatField(null=True, blank=True)
code = models.CharField(max_length=64)
wmo = models.IntegerField(
null=True,
blank=True
)
wigos = models.CharField(
null=True,
max_length=64,
blank=True
)
is_active = models.BooleanField(default=False)
is_automatic = models.BooleanField(default=True)
organization = models.CharField(
max_length=256,
null=True,
blank=True
)
observer = models.CharField(
max_length=256,
null=True,
blank=True
)
watershed = models.CharField(
max_length=256,
null=True,
blank=True
)
z = models.FloatField(
null=True,
blank=True
)
datum = models.CharField(
max_length=256,
null=True,
blank=True
)
zone = models.CharField(
max_length=256,
null=True,
blank=True
)
ground_water_province = models.CharField(
max_length=256,
null=True,
blank=True
)
river_code = models.IntegerField(
null=True,
blank=True
)
river_course = models.CharField(
max_length=64,
null=True,
blank=True
)
catchment_area_station = models.CharField(
max_length=256,
null=True,
blank=True
)
river_origin = models.CharField(
max_length=256,
null=True,
blank=True
)
easting = models.FloatField(
null=True,
blank=True
)
northing = models.FloatField(
null=True,
blank=True
)
river_outlet = models.CharField(
max_length=256,
null=True,
blank=True
)
river_length = models.IntegerField(
null=True,
blank=True
)
local_land_use = models.CharField(
max_length=256,
null=True,
blank=True
)
soil_type = models.CharField(
max_length=64,
null=True,
blank=True
)
site_description = models.CharField(
max_length=256,
null=True,
blank=True
)
land_surface_elevation = models.FloatField(
null=True,
blank=True
)
screen_length = models.FloatField(
null=True,
blank=True
)
top_casing_land_surface = models.FloatField(
null=True,
blank=True
)
depth_midpoint = models.FloatField(
null=True,
blank=True
)
screen_size = models.FloatField(
null=True,
blank=True
)
casing_type = models.CharField(
max_length=256,
null=True,
blank=True
)
casing_diameter = models.FloatField(
null=True,
blank=True
)
existing_gauges = models.CharField(
max_length=256,
null=True,
blank=True
)
flow_direction_at_station = models.CharField(
max_length=256,
null=True,
blank=True
)
flow_direction_above_station = models.CharField(
max_length=256,
null=True,
blank=True
)
flow_direction_below_station = models.CharField(
max_length=256,
null=True,
blank=True
)
bank_full_stage = models.CharField(
max_length=256,
null=True,
blank=True
)
bridge_level = models.CharField(
max_length=256,
null=True,
blank=True
)
access_point = models.CharField(
max_length=256,
null=True,
blank=True
)
temporary_benchmark = models.CharField(
max_length=256,
null=True,
blank=True
)
mean_sea_level = models.CharField(
max_length=256,
null=True,
blank=True
)
data_type = models.CharField(
max_length=256,
null=True,
blank=True
)
frequency_observation = models.CharField(
max_length=256,
null=True,
blank=True
)
historic_events = models.CharField(
max_length=256,
null=True,
blank=True
)
other_information = models.CharField(
max_length=256,
null=True,
blank=True
)
profile = models.ForeignKey(
StationProfile,
on_delete=models.DO_NOTHING,
null=True,
blank=True
)
hydrology_station_type = models.CharField(
max_length=64,
null=True,
blank=True
)
is_surface = models.BooleanField(default=True) # options are surface or ground
station_details = models.CharField(
max_length=256,
null=True,
blank=True
)
country = models.CharField(
max_length=256,
null=True,
blank=True
)
region = models.CharField(
max_length=256,
null=True,
blank=True
)
data_source = models.ForeignKey(
DataSource,
on_delete=models.DO_NOTHING,
null=True,
blank=True
)
communication_type = models.ForeignKey(
StationCommunication,
on_delete=models.DO_NOTHING,
null=True,
blank=True
)
utc_offset_minutes = models.IntegerField(
validators=[
MaxValueValidator(720),
MinValueValidator(-720)
])
alternative_names = models.CharField(
max_length=256,
null=True,
blank=True
)
wmo_station_type = models.ForeignKey(
WMOStationType,
on_delete=models.DO_NOTHING,
null=True,
blank=True
)
wmo_region = models.ForeignKey(
WMORegion,
on_delete=models.DO_NOTHING,
null=True,
blank=True
)
wmo_program = models.ForeignKey(
WMOProgram,
on_delete=models.DO_NOTHING,
null=True,
blank=True
)
wmo_station_plataform = models.CharField(
max_length=256,
null=True,
blank=True
)
operation_status = models.BooleanField(default=True)
class Meta:
unique_together = ('data_source', 'code')
ordering = ('name',)
def get_absolute_url(self):
"""Returns the url to access a particular instance of MyModelName."""
return reverse('station-detail', args=[str(self.id)])
def __str__(self):
return self.name + ' - ' + self.code
class StationVariable(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
variable = models.ForeignKey(Variable, on_delete=models.DO_NOTHING)
first_measurement = models.DateTimeField(null=True, blank=True)
last_measurement = models.DateTimeField(null=True, blank=True)
last_value = models.FloatField(null=True, blank=True)
height = models.FloatField(null=True, blank=True)
test_range_min = models.FloatField(null=True, blank=True)
test_range_max = models.FloatField(null=True, blank=True)
test_step_min = models.FloatField(null=True, blank=True)
test_step_max = models.FloatField(null=True, blank=True)
test_persistence_variance = models.FloatField(null=True, blank=True)
test_persistence_interval = models.FloatField(null=True, blank=True)
test_spike_value = models.FloatField(null=True, blank=True)
last_data_datetime = models.DateTimeField(null=True, blank=True)
last_data_value = models.FloatField(null=True, blank=True)
last_data_code = models.CharField(max_length=60, null=True, blank=True)
class Meta:
unique_together = ("station", "variable")
ordering = ["station__id", "variable__id", ]
class QualityFlag(BaseModel):
symbol = models.CharField(max_length=8, unique=True)
name = models.CharField(max_length=256, unique=True)
color = ColorField(default='#FF0000', null=True, blank=True)
def __str__(self):
return self.name
def document_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
path_to_file = 'documents/{0}_{1}.{2}'.format(instance.station.code, time.strftime("%Y%m%d_%H%M%S"),
filename.split('.')[-1])
logging.info(f"Saving file {filename} in {path_to_file}")
return path_to_file
class Document(BaseModel):
alias = models.CharField(max_length=256, null=True)
file = models.FileField(upload_to=document_directory_path)
station = models.ForeignKey(Station, on_delete=models.CASCADE)
processed = models.BooleanField(default=False)
decoder = models.ForeignKey(Decoder, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.file.name
class DataFile(BaseModel):
ready_at = models.DateTimeField(null=True, blank=True)
ready = models.BooleanField(default=False)
initial_date = models.DateTimeField(null=True, blank=True)
final_date = models.DateTimeField(null=True, blank=True)
source = models.CharField(max_length=30, null=False, blank=False, default="Raw data")
lines = models.IntegerField(null=True, blank=True, default=None)
prepared_by = models.CharField(max_length=256, null=True, blank=True)
interval_in_seconds = models.IntegerField(null=True, blank=True)
def __str__(self):
return 'file ' + str(self.id)
class DataFileStation(BaseModel):
datafile = models.ForeignKey(DataFile, on_delete=models.CASCADE)
station = models.ForeignKey(Station, on_delete=models.CASCADE)
class DataFileVariable(BaseModel):
datafile = models.ForeignKey(DataFile, on_delete=models.CASCADE)
variable = models.ForeignKey(Variable, on_delete=models.CASCADE)
class StationFile(BaseModel):
name = models.CharField(max_length=256, null=True)
file = models.FileField(upload_to='station_files/%Y/%m/%d/')
station = models.ForeignKey(Station, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Format(BaseModel):
name = models.CharField(
max_length=40,
unique=True,
)
description = models.CharField(
max_length=256,
)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class VariableFormat(BaseModel):
variable = models.ForeignKey(
Variable,
on_delete=models.DO_NOTHING,
)
format = models.ForeignKey(
Format,
on_delete=models.DO_NOTHING,
)
interval = models.ForeignKey(
Interval,
on_delete=models.DO_NOTHING,
)
lookup_key = models.CharField(
max_length=255,
)
class Meta:
ordering = ['variable', 'format', ]
def __str__(self):
return '{} {}'.format(self.variable, self.format)
class PeriodicJobType(BaseModel):
name = models.CharField(
max_length=40,
unique=True,
)
description = models.CharField(
max_length=256,
)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class PeriodicJob(BaseModel):
periodic_job_type = models.ForeignKey(
PeriodicJobType,
on_delete=models.DO_NOTHING,
)
station = models.ForeignKey(
Station,
on_delete=models.DO_NOTHING,
)
is_running = models.BooleanField(
default=False,
)
last_record = models.IntegerField(
default=0,
)
class Meta:
ordering = ('station', 'periodic_job_type',)
class Watershed(models.Model):
watershed = models.CharField(max_length=128)
size = models.CharField(max_length=16)
acres = models.FloatField()
hectares = models.FloatField()
shape_leng = models.FloatField()
shape_area = models.FloatField()
geom = models.MultiPolygonField(srid=4326)
class District(models.Model):
id_field = models.IntegerField()
district = models.CharField(max_length=64)
acres = models.FloatField()
hectares = models.FloatField()
geom = models.MultiPolygonField(srid=4326)
class NoaaTransmissionType(models.Model):
acronym = models.CharField(max_length=5, unique=True)
description = models.CharField(max_length=255)
def __str__(self):
return self.acronym
class NoaaTransmissionRate(models.Model):
rate = models.IntegerField(unique=True)
def __str__(self):
return str(self.rate)
class NoaaDcp(BaseModel):
dcp_address = models.CharField(max_length=256)
first_channel = models.IntegerField(null=True, blank=True)
first_channel_type = models.ForeignKey(NoaaTransmissionType, on_delete=models.CASCADE,
related_name="first_channels", null=True, blank=True)
second_channel = models.IntegerField(null=True, blank=True)
second_channel_type = models.ForeignKey(NoaaTransmissionType, on_delete=models.CASCADE,
related_name="second_channels", null=True, blank=True)
first_transmission_time = models.TimeField()
transmission_window = models.TimeField()
transmission_period = models.TimeField()
last_datetime = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.dcp_address
class NoaaDcpsStation(BaseModel):
station = models.ForeignKey(Station, on_delete=models.CASCADE)
noaa_dcp = models.ForeignKey(NoaaDcp, on_delete=models.CASCADE)
decoder = models.ForeignKey(Decoder, on_delete=models.CASCADE)
interval = models.ForeignKey(Interval, on_delete=models.CASCADE)
format = models.ForeignKey(Format, on_delete=models.CASCADE)
start_date = models.DateTimeField()
end_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return "{} {} - {}".format(self.station, self.noaa_dcp, self.interval)
class Meta:
verbose_name = "NMS DCP Station"
verbose_name_plural = "NMS DCP Stations"
class Flash(BaseModel):
type = models.CharField(max_length=60, choices=[(flashType.name, flashType.value) for flashType in FlashTypeEnum])
datetime = models.DateTimeField()
latitude = models.FloatField()
longitude = models.FloatField()
peak_current = models.IntegerField()
ic_height = models.IntegerField()
num_sensors = models.IntegerField()
ic_multiplicity = models.IntegerField()
cg_multiplicity = models.IntegerField()
start_datetime = models.DateTimeField()
duration = models.IntegerField()
ul_latitude = models.FloatField()
ul_longitude = models.FloatField()
lr_latitude = models.FloatField()
lr_longitude = models.FloatField()
def __str__(self):
return "{} {} - {}".format(self.latitude, self.longitude, self.datetime)
class QcRangeThreshold(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
variable = models.ForeignKey(Variable, on_delete=models.DO_NOTHING)
interval = models.IntegerField(null=True, blank=True)
range_min = models.FloatField(null=True, blank=True)
range_max = models.FloatField(null=True, blank=True)
month = models.IntegerField(default=1)
def __str__(self):
return f"station={self.station.code} variable={self.variable.symbol} interval={self.interval}"
class Meta:
ordering = ('station', 'variable', 'month', 'interval',)
unique_together = ("station", "variable", "month", "interval",)
class QcStepThreshold(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
variable = models.ForeignKey(Variable, on_delete=models.DO_NOTHING)
interval = models.IntegerField(null=True, blank=True)
step_min = models.FloatField(null=True, blank=True)
step_max = models.FloatField(null=True, blank=True)
class Meta:
ordering = ('station', 'variable', 'interval')
unique_together = ("station", "variable", "interval")
class QcPersistThreshold(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
variable = models.ForeignKey(Variable, on_delete=models.DO_NOTHING)
interval = models.IntegerField()
window = models.IntegerField()
minimum_variance = models.FloatField()
class Meta:
ordering = ('station', 'variable', 'interval')
unique_together = ("station", "variable", "interval")
class FTPServer(BaseModel):
name = models.CharField(max_length=64, unique=True)
host = models.CharField(max_length=256)
port = models.IntegerField()
username = models.CharField(max_length=128)
password = models.CharField(max_length=128)
is_active_mode = models.BooleanField()
def __str__(self):
return f'{self.name} - {self.host}:{self.port}'
class Meta:
unique_together = ('host', 'port', 'username', 'password')
class StationFileIngestion(BaseModel):
ftp_server = models.ForeignKey(FTPServer, on_delete=models.DO_NOTHING)
remote_folder = models.CharField(max_length=1024)
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
file_pattern = models.CharField(max_length=256)
decoder = models.ForeignKey(Decoder, on_delete=models.DO_NOTHING)
cron_schedule = models.CharField(max_length=64, default='15/15 * * * *')
utc_offset_minutes = models.IntegerField()
delete_from_server = models.BooleanField()
is_active = models.BooleanField(default=True)
is_binary_transfer = models.BooleanField(default=False)
is_historical_data = models.BooleanField(default=False)
override_data_on_conflict = models.BooleanField(default=False)
class Meta:
unique_together = ('ftp_server', 'remote_folder', 'station')
def __str__(self):
return f'{self.ftp_server} - {self.station}'
class StationDataFileStatus(BaseModel):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Meta:
verbose_name = "station data file status"
verbose_name_plural = "station data file statuses"
class StationDataFile(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
decoder = models.ForeignKey(Decoder, on_delete=models.DO_NOTHING)
status = models.ForeignKey(StationDataFileStatus, on_delete=models.DO_NOTHING)
utc_offset_minutes = models.IntegerField()
filepath = models.CharField(max_length=1024)
file_hash = models.CharField(max_length=128, db_index=True)
file_size = models.IntegerField()
observation = models.TextField(max_length=1024, null=True, blank=True)
is_historical_data = models.BooleanField(default=False)
override_data_on_conflict = models.BooleanField(default=False)
def __str__(self):
return f'{self.filepath}'
class HourlySummaryTask(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
datetime = models.DateTimeField()
started_at = models.DateTimeField(null=True, blank=True)
finished_at = models.DateTimeField(null=True, blank=True)
class DailySummaryTask(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
date = models.DateField()
started_at = models.DateTimeField(null=True, blank=True)
finished_at = models.DateTimeField(null=True, blank=True)
class DcpMessages(BaseModel):
# 8 hex digit DCP Address
noaa_dcp = models.ForeignKey(NoaaDcp, on_delete=models.DO_NOTHING)
# YYDDDHHMMSS – Time the message arrived at the Wallops receive station.
# The day is represented as a three digit day of the year (julian day).
datetime = models.DateTimeField()
# 1 character failure code
failure_code = models.CharField(max_length=1)
# 2 decimal digit signal strength
signal_strength = models.CharField(max_length=2)
# 2 decimal digit frequency offset
frequency_offset = models.CharField(max_length=2)
# 1 character modulation index
modulation_index = models.CharField(max_length=1)
# 1 character data quality indicator
data_quality = models.CharField(max_length=1)
# 3 decimal digit GOES receive channel
channel = models.CharField(max_length=3)
# 1 character GOES spacecraft indicator (‘E’ or ‘W’)
spacecraft_indicator = models.CharField(max_length=1)
# 2 character data source code Data Source Code Table
data_source = models.ForeignKey(DataSource, on_delete=models.DO_NOTHING)
# 5 decimal digit message data length
message_data_length = models.CharField(max_length=5)
payload = models.TextField()
def station(self):
return NoaaDcpsStation.objects.get(noaa_dcp=self.noaa_dcp).station
class Meta:
unique_together = ('noaa_dcp', 'datetime')
ordering = ('noaa_dcp', 'datetime')
@classmethod
def create(cls, header, payload):
# 5020734E20131172412G44+0NN117EXE00278
dcp_address = header[:8]
print(f"create header={header} dcp_address={dcp_address}")
noaa_dcp = NoaaDcp.objects.get(dcp_address=dcp_address)
datetime = pytz.utc.localize(dt.strptime(header[8:19], '%y%j%H%M%S'))
failure_code = header[19:20]
signal_strength = header[20:22]
frequency_offset = header[22:24]
modulation_index = header[24:25]
data_quality = header[25:26]
channel = header[26:29]
spacecraft_indicator = header[29:30]
data_source = header[30:32]
data_source_obj, created = DataSource.objects.get_or_create(symbol=data_source,
defaults={
'name': data_source,
'created_at': now(),
'updated_at': now()
})
message_data_length = header[32:37]
dcp_msg = cls(
noaa_dcp=noaa_dcp,
datetime=datetime,
failure_code=failure_code,
signal_strength=signal_strength,
frequency_offset=frequency_offset,
modulation_index=modulation_index,
data_quality=data_quality,
channel=channel,
spacecraft_indicator=spacecraft_indicator,
data_source=data_source_obj,
message_data_length=message_data_length,
payload=payload
)
return dcp_msg
class RatingCurve(BaseModel):
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
start_date = models.DateTimeField()
def __str__(self):
return f'{self.station.name} - {self.start_date}'
class RatingCurveTable(BaseModel):
rating_curve = models.ForeignKey(RatingCurve, on_delete=models.DO_NOTHING)
h = models.FloatField()
q = models.FloatField()
class WxPermission(BaseModel):
name = models.CharField(max_length=256, unique=True)
url_name = models.CharField(max_length=256)
permission = models.CharField(max_length=32, choices=(
('read', 'Read'), ('write', 'Write'), ('update', 'Update'), ('delete', 'Delete')))
def __str__(self):
return self.name
class WxGroupPermission(BaseModel):
group = models.OneToOneField(Group, on_delete=models.DO_NOTHING)
permissions = models.ManyToManyField(WxPermission)
def __str__(self):
return self.group.name
class StationImage(BaseModel):
station = models.ForeignKey(Station, related_name='station_images', on_delete=models.CASCADE)
name = models.CharField(max_length=256)
path = models.FileField(upload_to='station_images/%Y/%m/%d/')
description = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return self.name
class HydroMLPrediction(BaseModel):
name = models.CharField(max_length=256)
hydroml_prediction_id = models.IntegerField()
variable = models.ForeignKey(Variable, on_delete=models.DO_NOTHING)
class Meta:
unique_together = ('hydroml_prediction_id', 'variable')
def __str__(self):
return self.name
class HydroMLPredictionMapping(BaseModel):
hydroml_prediction = models.ForeignKey(HydroMLPrediction, on_delete=models.DO_NOTHING)
prediction_result = models.CharField(max_length=32)
quality_flag = models.ForeignKey(QualityFlag, on_delete=models.DO_NOTHING)
class Meta:
unique_together = ('hydroml_prediction', 'quality_flag')
def __str__(self):
return f'{self.prediction_result} - {self.quality_flag}'
class Neighborhood(BaseModel):
name = models.CharField(max_length=256, unique=True)
def __str__(self):
return self.name
class StationNeighborhood(BaseModel):
neighborhood = models.ForeignKey(Neighborhood, related_name='neighborhood_stations', on_delete=models.DO_NOTHING)
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
class Meta:
unique_together = ('neighborhood', 'station')
def __str__(self):
return f'{self.neighborhood.name} - {self.station}'
class HydroMLPredictionStation(BaseModel):
prediction = models.ForeignKey(HydroMLPrediction, on_delete=models.DO_NOTHING)
neighborhood = models.ForeignKey(Neighborhood, on_delete=models.DO_NOTHING)
target_station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
data_period_in_minutes = models.IntegerField()
interval_in_minutes = models.IntegerField()
def __str__(self):
return f'{self.prediction.name} - {self.neighborhood.name}'
class StationDataMinimumInterval(BaseModel):
datetime = models.DateTimeField()
station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)
variable = models.ForeignKey(Variable, on_delete=models.DO_NOTHING)
minimum_interval = models.TimeField(null=True, blank=True)
record_count = models.IntegerField()
ideal_record_count = models.IntegerField()
record_count_percentage = models.FloatField()
class Meta:
unique_together = ('datetime', 'station', 'variable')
def __str__(self):
return f'{self.datetime}: {self.station} - {self.variable}'
|
StarcoderdataPython
|
8001396
|
<gh_stars>0
from time import time
import torch
import numpy as np
from abc import ABC, abstractmethod
# tensorboard stuff
from torch.utils.tensorboard import SummaryWriter
# my libraries
from .util import utilities as util_
from . import losses as ls
from . import cluster
BACKGROUND_LABEL = 0
TABLE_LABEL = 1
OBJECTS_LABEL = 2
NUM_GPUS = torch.cuda.device_count()
def smart_random_sample_indices(X, Y, num_seeds):
""" Helper function to sample seeds for mean shift training
@param predicted_centers: a [N x 3] torch.FloatTensor
@param Y: a [N] torch.LongTensor with values in {2, ... K+1}
@param num_seeds: int
"""
unique_obj_labels = torch.unique(Y)
num_objects = unique_obj_labels.shape[0]
indices = torch.zeros(0, dtype=torch.long, device=X.device)
num_seeds_per_obj = int(np.ceil(num_seeds / num_objects))
for k in unique_obj_labels:
label_indices = torch.where(Y == k)[0]
randperm = torch.randperm(label_indices.shape[0])
inds = label_indices[randperm[:num_seeds_per_obj]]
indices = torch.cat([indices, inds], dim=0)
X_I = X[indices, :] # Shape: [num_seeds x 3]
Y_I = Y[indices] # Shape: [num_seeds]
return X_I, Y_I
def hill_climb_one_iter(Z, X, sigmas):
""" Runs one iteration of GBMS hill climbing algorithm
The seeds climb the distribution given by the KDE of X
Note: X is not edited by this method
@param Z: a [m x d] torch.FloatTensor of seeds
@param X: a [n x d] torch.FloatTensor of d-dim unit vectors
@param sigmas: a [1 x n] torch.FloatTensor of sigmas OR a Python float
"""
W = cluster.gaussian_kernel(Z, X, sigmas) # Shape: [m x n]
Q = W / W.sum(dim=1, keepdim=True) # Shape: [m x n]
Z = torch.mm(Q, X)
return Z
class Trainer(ABC):
def __init__(self, model_wrapper, config):
self.model_wrapper = model_wrapper
self.device = self.model_wrapper.device
self.config = config
self.setup()
@abstractmethod
def setup(self):
pass
@abstractmethod
def train(self):
pass
def save(self, name=None, save_dir=None):
""" Save optimizer state, epoch/iter nums, loss information
Also save model state
"""
# Save optimizer stuff
checkpoint = {'iter_num': self.iter_num, 'epoch_num': self.epoch_num, 'infos': self.infos,
'optimizer': self.optimizer.state_dict()}
if save_dir is None:
save_dir = self.config['tb_directory']
if name is None:
filename = save_dir + self.__class__.__name__ + '_' \
+ self.model_wrapper.__class__.__name__ \
+ '_iter' + str(self.iter_num) \
+ '_checkpoint.pth'
else:
filename = save_dir + name + '_checkpoint.pth'
torch.save(checkpoint, filename)
# Save model stuff
filename = save_dir + self.model_wrapper.__class__.__name__ \
+ '_iter' + str(self.iter_num) \
+ '_' + str(self.model_wrapper.config['feature_dim']) + 'c' \
+ '_checkpoint.pth'
self.model_wrapper.save(filename)
def load(self, opt_filename, model_filename):
""" Load optimizer state, epoch/iter nums, loss information
Also load model state
"""
# Load optimizer stuff
checkpoint = torch.load(opt_filename)
self.optimizer.load_state_dict(checkpoint['optimizer'])
print(f"Loaded optimizer")
self.iter_num = checkpoint['iter_num']
self.epoch_num = checkpoint['epoch_num']
self.infos = checkpoint['infos']
# Load model stuff
self.model_wrapper.load(model_filename)
class DSNTrainer(Trainer):
def setup(self):
# Initialize stuff
self.epoch_num = 1
self.iter_num = 1
self.infos = dict()
# Initialize optimizer
model_config = self.model_wrapper.model.parameters()
self.optimizer = torch.optim.Adam(model_config, lr=self.config['lr'])
if self.config['load']:
self.load(self.config['opt_filename'], self.config['model_filename'])
# Losses
foreground_loss = ls.CELossWeighted(weighted=True)
center_offset_loss = ls.SmoothL1LossWeighted(weighted=True)
separation_loss = ls.CELossWeightedMasked(weighted=True)
cluster_loss = ls.ClusterLossWeighted(self.config['delta'], weighted=True)
self.losses = {
'fg_loss': foreground_loss,
'co_loss': center_offset_loss,
'sep_loss': separation_loss,
'cl_loss': cluster_loss,
}
# Tensorboard stuff
self.tb_writer = SummaryWriter(self.config['tb_directory'],
flush_secs=self.config['flush_secs'])
def train(self, num_epochs, data_loader):
# Some stuff to keep track of
batch_time = util_.AverageMeter()
data_time = util_.AverageMeter()
total_losses = util_.AverageMeter()
fg_losses = util_.AverageMeter()
center_offset_losses = util_.AverageMeter()
cluster_losses = util_.AverageMeter()
separation_losses = util_.AverageMeter()
end = time()
# Training mode
self.model_wrapper.train_mode()
for epoch_iter in range(num_epochs):
for i, batch in enumerate(data_loader):
if self.iter_num >= self.config['max_iters']:
print("Reached maximum number of iterations...")
break
# Send everything to GPU
self.model_wrapper.send_batch_to_device(batch)
# Get labels
foreground_labels = batch['foreground_labels'] # Shape: [N x H x W]
center_offset_labels = batch['center_offset_labels'] # Shape: [N x 2 x H x W]
object_centers = batch['object_centers'] # Shape: [N x 100 x 3]
# measure data loading time
data_time.update(time() - end)
N, H, W = foreground_labels.shape
# This is (potentially) in parallel
fg_logits, center_offsets = self.model_wrapper.model(batch['xyz'])
### Foreground Loss ###
fg_masks = foreground_labels.clamp(0, 2).long()
fg_loss = self.losses['fg_loss'](fg_logits, fg_masks)
### Center Prediction Loss ###
center_offset_loss = self.losses['co_loss'](center_offsets, center_offset_labels, foreground_labels)
separation_loss = torch.tensor(0.).to(self.device)
cluster_loss = torch.tensor(0.).to(self.device)
L = self.config['max_GMS_iters']
for j in range(N):
fg_mask_j = fg_masks[j] == OBJECTS_LABEL
if torch.sum(fg_mask_j) == 0:
continue
### Separation Loss ###
predicted_centers = batch['xyz'][j] + center_offsets[j]
# Cross-Entropy Loss for M. Only run on FG pixels
gt_centers = object_centers[j, :batch['num_3D_centers'][j]]
M_logits = self.model_wrapper.construct_M_logits(predicted_centers, gt_centers)
M_gt = ls.create_M_GT(foreground_labels[j])
separation_loss = separation_loss + \
self.losses['sep_loss'](M_logits.unsqueeze(0),
M_gt.unsqueeze(0),
foreground_labels[j].unsqueeze(0))
### Cluster loss ###
# Note: the meanshift is spread across GPUs to spread memory across
X = predicted_centers.permute(1,2,0)
X_fg = X[fg_mask_j][::self.model_wrapper.config['subsample_factor'], ...] # Shape: [num_fg_pixels//subsample_factor x 3]
Y_fg = foreground_labels[j, fg_mask_j][::self.model_wrapper.config['subsample_factor']] # Shape: [num_fg_pixels//subsample_factor]
X_fg = X_fg.to(f'cuda:{j % NUM_GPUS}'); Y_fg = Y_fg.to(f'cuda:{j % NUM_GPUS}')
X_I, Y_I = smart_random_sample_indices(X_fg, Y_fg, self.config['num_seeds_training'])
for l in range(L):
X_I = hill_climb_one_iter(X_I, X_fg, self.model_wrapper.config['sigma'])
cluster_loss = cluster_loss + self.losses['cl_loss'](X_I, Y_I, X_fg, Y_fg).to(self.device)
# Weight the frame-wise losses by batch size
separation_loss = separation_loss / N
cluster_loss = cluster_loss / (N * L)
# Total loss. Note: foreground loss is always computed/backpropagated
loss = self.config['lambda_fg'] * fg_loss + \
self.config['lambda_co'] * center_offset_loss + \
self.config['lambda_sep'] * separation_loss + \
self.config['lambda_cl'] * cluster_loss
### Gradient descent ###
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure accuracy and record loss
total_losses.update(loss.item(), N)
fg_losses.update(fg_loss.item(), N)
center_offset_losses.update(center_offset_loss.item(), N)
cluster_losses.update(cluster_loss.item(), N)
separation_losses.update(separation_loss.item(), N)
# Record some information about this iteration
batch_time.update(time() - end)
end = time()
# Record information every x iterations
if self.iter_num % self.config['iter_collect'] == 0:
info = {'iter_num': self.iter_num,
'Batch Time': round(batch_time.avg, 3),
'Data Time': round(data_time.avg, 3),
'loss': round(total_losses.avg, 7),
'FG loss': round(fg_losses.avg, 7),
'Center Offset loss': round(center_offset_losses.avg, 7),
'Cluster loss': round(cluster_losses.avg, 7),
'Separation loss' : round(separation_losses.avg, 7),
}
self.infos[self.iter_num] = info
# Tensorboard stuff
self.tb_writer.add_scalar('Total Loss', info['loss'], self.iter_num)
self.tb_writer.add_scalar('Loss/Foreground', info['FG loss'], self.iter_num)
self.tb_writer.add_scalar('Loss/Center Offset', info['Center Offset loss'], self.iter_num)
self.tb_writer.add_scalar('Loss/Cluster', info['Cluster loss'], self.iter_num)
self.tb_writer.add_scalar('Loss/Separation', info['Separation loss'], self.iter_num)
self.tb_writer.add_scalar('Time/per iter', info['Batch Time'], self.iter_num)
self.tb_writer.add_scalar('Time/data fetch', info['Data Time'], self.iter_num)
# Reset meters
batch_time = util_.AverageMeter()
data_time = util_.AverageMeter()
total_losses = util_.AverageMeter()
fg_losses = util_.AverageMeter()
center_offset_losses = util_.AverageMeter()
cluster_losses = util_.AverageMeter()
separation_losses = util_.AverageMeter()
end = time()
self.iter_num += 1
self.epoch_num += 1
class RRNTrainer(Trainer):
def setup(self):
# Initialize stuff
self.epoch_num = 1
self.iter_num = 1
self.infos = dict()
# Initialize optimizer
model_config = self.model_wrapper.model.parameters()
self.optimizer = torch.optim.Adam(model_config, lr=self.config['lr'])
if self.config['load']:
self.load(self.config['opt_filename'], self.config['model_filename'])
# Losses
foreground_loss = ls.BCEWithLogitsLossWeighted(weighted=True)
self.losses = {
'fg_loss' : foreground_loss,
}
# Tensorboard stuff
self.tb_writer = SummaryWriter(self.config['tb_directory'],
flush_secs=self.config['flush_secs'])
def train(self, num_epochs, data_loader):
# Some stuff to keep track of
batch_time = util_.AverageMeter()
data_time = util_.AverageMeter()
total_losses = util_.AverageMeter()
end = time()
# Training mode
self.model_wrapper.train_mode()
for epoch_iter in range(num_epochs):
for i, batch in enumerate(data_loader):
if self.iter_num >= self.config['max_iters']:
print("Reached maximum number of iterations...")
break
# Send everything to GPU
self.model_wrapper.send_batch_to_device(batch)
# Get labels
labels = batch['labels'].float() # Shape: [N x H x W]
# measure data loading time
data_time.update(time() - end)
N, H, W = labels.shape
# Apply the model
logits = self.model_wrapper.model(batch) # Shape: [N x 3 x H x W]
# Apply the loss
loss = self.losses['fg_loss'](logits, labels)
### Gradient descent ###
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure accuracy and record loss
total_losses.update(loss.item(), N)
# Record some information about this iteration
batch_time.update(time() - end)
end = time()
# Record information every x iterations
if self.iter_num % self.config['iter_collect'] == 0:
info = {'iter_num': self.iter_num,
'Batch Time': round(batch_time.avg, 3),
'Data Time': round(data_time.avg, 3),
'loss': round(total_losses.avg, 7),
}
self.infos[self.iter_num] = info
# Tensorboard stuff
self.tb_writer.add_scalar('Total Loss', info['loss'], self.iter_num)
self.tb_writer.add_scalar('Time/per iter', info['Batch Time'], self.iter_num)
self.tb_writer.add_scalar('Time/data fetch', info['Data Time'], self.iter_num)
# Reset meters
batch_time = util_.AverageMeter()
data_time = util_.AverageMeter()
total_losses = util_.AverageMeter()
end = time()
self.iter_num += 1
self.epoch_num += 1
|
StarcoderdataPython
|
339675
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
"""Auto diff transformation."""
from enum import Enum
from typing import Iterable, List, Mapping, Optional, Tuple
import popart._internal.ir as _ir
from popart.ir.graph import Graph
from popart.ir.tensor import Tensor
from popart.ir.ops.call import CallInfo
__all__ = [
'autodiff', 'get_expected_forward_inputs_from_call',
'ExpectedConnectionType', 'ExpectedConnection', 'GradGraphInfo'
]
class ExpectedConnectionType(Enum):
Fwd = "Fwd"
FwdGrad = "FwdGrad"
@classmethod
def _from_pb(cls, type_: _ir.ExpectedConnectionType):
if type_ == _ir.ExpectedConnectionType.Fwd:
return ExpectedConnectionType.Fwd
return ExpectedConnectionType.FwdGrad
def _to_pb(self):
if self == ExpectedConnectionType.Fwd:
return _ir.ExpectedConnectionType.Fwd
return _ir.ExpectedConnectionType.FwdGrad
class ExpectedConnection:
def __init__(self) -> None:
self._fwd_graph: _ir.Graph
self._pb_ec: _ir.ExpectedConnection
raise TypeError(
"ExpectedConnection should not be constructed directly. Use ExpectedConnection._from_pb instead."
)
@classmethod
def _from_pb(cls, fwd_graph: _ir.Graph,
pb_ec: _ir.ExpectedConnection) -> "ExpectedConnection":
self = super().__new__(cls)
self._fwd_graph = fwd_graph
self._pb_ec = pb_ec
return self
def __repr__(self) -> str:
return f"({self.connection_type}, {self.fwd_tensor})"
@property
def fwd_tensor(self) -> Tensor:
return Tensor._from_pb_tensor(
self._fwd_graph.getTensor(self._pb_ec.fwdId))
@property
def connection_type(self):
return ExpectedConnectionType._from_pb(self._pb_ec.type)
class GradGraphInfo:
"""The result of differentiating a graph.
`graph` is the computational graph for computing the gradient
`expected_inputs` are Tensors from the forward_graph
that are required as inputs to the grad `graph`
`expected_outputs` are Tensors from the forward_graph that have gradients as
outputs of the grad `graph`."""
def __init__(self) -> None:
self._ir: _ir.Ir
self._fwd_graph: _ir.Graph
self._expected_inputs: List[ExpectedConnection]
self._expected_outputs: List[ExpectedConnection]
self._pb_bwd_info: _ir.BwdGraphInfo
raise TypeError(
"GradGraphInfo should not be constructed directly. Use GradGraphInfo._from_pb instead."
)
@classmethod
def _from_pb(cls, ir: _ir.Ir, fwd_graph: _ir.Graph,
_pb_bwd_info: _ir.BwdGraphInfo) -> "GradGraphInfo":
self = super().__new__(cls)
self._ir = ir
self._fwd_graph = fwd_graph
self._expected_inputs = [
ExpectedConnection._from_pb(fwd_graph, e)
for e in _pb_bwd_info.expectedInputs
]
self._expected_outputs = [
ExpectedConnection._from_pb(fwd_graph, e)
for e in _pb_bwd_info.expectedOutputs
]
self._pb_bwd_info = _pb_bwd_info
return self
@property
def graph(self):
"""The computational graph for computing the gradient"""
_id = self._pb_bwd_info.bwdGraphId
return Graph._from_pb(self._ir.getGraph(_id))
@property
def forward_graph(self):
return Graph._from_pb(self._fwd_graph)
@property
def expected_inputs(self) -> Tuple[ExpectedConnection, ...]:
"""Tensors (or their gradients) from the forward_graph
that are required as inputs to the gradient graph"""
return tuple(self._expected_inputs)
@property
def expected_outputs(self) -> Tuple[ExpectedConnection, ...]:
"""Tensors from the forward_graph that have gradients as
outputs of the gradient graph"""
return tuple(self._expected_outputs)
def get_input_tensors(self) -> Tuple[Tensor, ...]:
return tuple(map(lambda ec: ec.fwd_tensor, self._expected_inputs))
def get_output_tensors(self) -> Tuple[Tensor, ...]:
return tuple(map(lambda ec: ec.fwd_tensor, self._expected_outputs))
def autodiff(graph: Graph,
grads_provided: Optional[Iterable[Tensor]] = None,
grads_required: Optional[Iterable[Tensor]] = None,
called_graphs_grad_info: Optional[
Mapping[Graph, GradGraphInfo]] = None,
return_all_grad_graphs: bool = False):
"""Differentiate a Graph.
The graph will be differentiated using the chain rule starting from `grads_provided`.
The outputs of the returned graph will be the gradient of the Tensors in `grads_required`.
By default `gradProvided` will be all of the outputs of the forward graph and `grads_required` will
be all of the inputs to the forward graph.
Any Tensors in the forward graph that are needed to compute the gradients will be added as outputs
to the forward graph (if not already an input/output).
The returned `GradGraphInfo` contains the gradient graph and information regarding all required inputs
to the gradient graph. This can include tensors which are outputs of the forward graph `ExpectedConnectionType.Fwd`,
or a gradient of an output of the forwards graph `ExpectedConnectionType.FwdGrad`.
Any graphs called in the forward graph will recursively have `autodiff` called on it. Arg `called_graphs_grad_info` can be
used to specify the result of `autodiff` on a called graph that has already been differentiated.
By default GradGraphInfo will only be returned for the provided forward graph. Arg `return_all_grad_graphs` can be set to `True` to return
info on all graphs that `autodiff` as executed on as a result of this transformation.
Args:
graph (pir.Graph
grads_provided (Optional[Iterable[pir.Tensor]], optional) Defaults to all outputs of the provided graph.
grads_required (Optional[Iterable[pir.Tensor]], optional). Defaults to all inputs of the provided graph.
called_graphs_grad_info (Optional[Mapping[pir.Graph, GradGraphInfo]], optional). Defaults to None.
return_all_grad_graphs (bool, optional). Defaults to False.
Returns:
grad_info: GradGraphInfo
"""
grads_provided = graph.get_output_tensors(
) if grads_provided is None else grads_provided
grads_required = graph.get_input_tensors(
) if grads_required is None else grads_required
called_graphs_grad_info = {} if called_graphs_grad_info is None else called_graphs_grad_info
_pb_ir = graph.ir()._pb_ir
transform = _ir.transforms.Autodiff()
_pb_result = transform.apply(
_pb_ir, _ir.GraphId(graph.name), [t.id for t in grads_provided],
_ir.OptionalTensors([t.id for t in grads_required]),
{k: v._pb_bwd_info
for k, v in called_graphs_grad_info.items()})
result: Mapping[Graph, GradGraphInfo] = {}
for k, v in _pb_result.items():
_graph = Graph._from_pb(_pb_ir.getGraph(k))
result[_graph] = GradGraphInfo._from_pb(_pb_ir, _graph._pb_graph, v)
if return_all_grad_graphs:
return result
return result[graph]
def get_expected_forward_inputs_from_call(
call_info: CallInfo,
grad_info: GradGraphInfo) -> Mapping[Tensor, Tensor]:
"""Utility function for constructing inputs to calling a grad graph.
Args:
call_info: `popart.ir.ops.call.CallInfo`
Callsite info of a call to the graph that was auto-differentiated. This can be accessed by
using `ops.call_with_info()`
grad_info: `GradGraphInfo`
Output of autodiff on a graph.
Returns: `Mapping[Tensor, Tensor]`
from: a Tensor in the gradient Graph
to: an input or output tensor at a callsite of the corresponding forward Graph.
"""
if call_info.called_graph != grad_info.forward_graph:
raise TypeError(
"The called graph does not match the graph that was auto-differentiated."
)
return {
Tensor._from_pb_tensor(grad_info.graph._pb_graph.getInputTensor(idx)):
call_info.subgraph_to_op_tensor(act.fwd_tensor)
for idx, act in enumerate(grad_info.expected_inputs)
if act.connection_type == ExpectedConnectionType.Fwd
}
|
StarcoderdataPython
|
4968413
|
import re
class EncodedStringFilter:
def __init__(self, \
min_encoded_string_length_inclusive=1, \
max_encoded_string_length_exclusive=1000, \
encoded_string_ignore_filter_regex="^$"):
if min_encoded_string_length_inclusive < 0:
raise ValueError("""min_encoded_string_length_inclusive must be greater than
or equal to zero""")
if max_encoded_string_length_exclusive <= min_encoded_string_length_inclusive:
raise ValueError("""max_encoded_string_length_exclusive must be greater than
or equal to min_encoded_string_length_inclusive""")
self.length_range = range( \
min_encoded_string_length_inclusive, \
max_encoded_string_length_exclusive)
self.pattern = re.compile(encoded_string_ignore_filter_regex)
def should_evaluate_encoded_string(self, encoded_string):
return (len(encoded_string) in self.length_range) and \
(not self.pattern.match(encoded_string))
|
StarcoderdataPython
|
252089
|
<reponame>Zazmuz/random_programs
import time
# God---------------------------------------------------------------------------------------
#
# try_out = ["In the beginning", "God created the heavens and the earth"]
# timed_try = 0
# holy_number = 3-0.333-0.333-0.333-0.333-0.333
# repeat = 0
# while True:
# timed_try -= timed_try
# time.sleep(holy_number)
# print(try_out[timed_try])
#
# timed_try += 1
# time.sleep(holy_number)
# print(try_out[timed_try])
# repeat += 1
# if repeat == 3:
# while True:
# print("Kneel before your king!")
#
# Counter---------------------------------------------------------------------------------
# counting_seconds = 2450
# counting_seconds = 0
# counting_minutes = 0
# counting_hours = 0
# while True:
# if counting_seconds >= 60:
# while counting_seconds >= 60:
# counting_seconds -= 60
# counting_minutes += 1
# if counting_minutes >= 60:
# while counting_minutes >= 60:
# counting_minutes -= 60
# counting_hours += 1
# print("\n secs:" + str(counting_seconds) + "\n minutes: " + str(counting_minutes) + "\n hours: " + str(counting_hours) + "\n")
# counting_seconds += 1
# time.sleep(1)
# Fibonacci--------------------------------------------------------------------------------
# def fibonacci_finder(repeats):
# fibonacci_sequence = None
# fibonacci_number_1 = 0
# fibonacci_number_2 = 1
# skipper = 0
# for i in range(repeats):
# if skipper == 1:
# fibonacci_sequence = fibonacci_number_1 + fibonacci_number_2
# fibonacci_number_2 = fibonacci_number_1
# fibonacci_number_1 = fibonacci_sequence
# else:
# skipper = 1
# fibonacci_sequence = 0
# print("The " + str(repeats) + "th number of the fibonacci seuence is: " + str(fibonacci_sequence))
#
#
# def fibonacci_checker(number):
# fibonacci_sequence = None
# fibonacci_number_1 = 0
# fibonacci_number_2 = 1
# no_overly_large_numbers_are_ok = 0
# skipper = 0
# while True:
# if skipper == 1:
# fibonacci_sequence = fibonacci_number_1 + fibonacci_number_2
# fibonacci_number_2 = fibonacci_number_1
# fibonacci_number_1 = fibonacci_sequence
# if fibonacci_sequence == number:
# print("Your number is a fibonacci number!")
# break
# if no_overly_large_numbers_are_ok > 5000:
# print("Your number is not a fibonocci numbers! :(")
# break
# no_overly_large_numbers_are_ok += 1
# else:
# skipper = 1
# fibonacci_sequence = 0
# fibonacci_checker(int(input()))
# Calculator----------------------------------------------------------------------------
number_one = None
number_two = None
method = None
answer = None
def plus(number_one, number_two):
answer = number_one + number_two
return answer
def minus(number_one, number_two):
answer = number_one - number_two
return answer
def division(number_one, number_two):
answer = number_one / number_two
return answer
def multiplication(number_one, number_two):
answer = number_one * number_two
return answer
number_one = int(input("What is your first number?: "))
number_two = int(input("What is your second number?: "))
method = input("Choose a calculation method: + - * /: ")
if method == "+":
print(plus(number_one, number_two))
if method == "-":
print(minus(number_one, number_two))
if method == "/":
print(division(number_one, number_two))
if method == "*":
print(multiplication(number_one, number_two))
|
StarcoderdataPython
|
5124819
|
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from six import moves
import six.moves.urllib.parse as urlparse
import sqlalchemy as sa
from ceilometer.i18n import _LE, _LI
from ceilometer import service
from ceilometer import storage
LOG = log.getLogger(__name__)
def upgrade():
conf = cfg.ConfigOpts()
conf.register_cli_opts([
cfg.BoolOpt('skip-metering-database',
help='Skip metering database upgrade.',
default=False),
cfg.BoolOpt('skip-event-database',
help='Skip event database upgrade.',
default=False),
cfg.BoolOpt('skip-gnocchi-resource-types',
help='Skip gnocchi resource-types upgrade.',
default=False),
])
service.prepare_service(conf=conf)
if conf.skip_metering_database:
LOG.info("Skipping metering database upgrade")
else:
LOG.debug("Upgrading metering database")
storage.get_connection_from_config(conf, 'metering').upgrade()
if conf.skip_event_database:
LOG.info("Skipping event database upgrade")
else:
LOG.debug("Upgrading event database")
storage.get_connection_from_config(conf, 'event').upgrade()
if conf.skip_gnocchi_resource_types:
LOG.info("Skipping Gnocchi resource types upgrade")
else:
LOG.debug("Upgrading Gnocchi resource types")
from ceilometer import gnocchi_client
gnocchi_client.upgrade_resource_types(conf)
def expirer():
conf = service.prepare_service()
if conf.database.metering_time_to_live > 0:
LOG.debug("Clearing expired metering data")
storage_conn = storage.get_connection_from_config(conf, 'metering')
storage_conn.clear_expired_metering_data(
conf.database.metering_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database metering time to live "
"is disabled"))
if conf.database.event_time_to_live > 0:
LOG.debug("Clearing expired event data")
event_conn = storage.get_connection_from_config(conf, 'event')
event_conn.clear_expired_event_data(
conf.database.event_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database event time to live "
"is disabled"))
def db_clean_legacy():
conf = cfg.ConfigOpts()
conf.register_cli_opts([
cfg.strOpt('confirm-drop-alarm-table',
short='n',
help='confirm to drop the legacy alarm tables')])
if not conf.confirm_drop_alarm_table:
confirm = moves.input("Do you really want to drop the legacy alarm "
"tables? This will destroy data definitely "
"if it exist. Please type 'YES' to confirm: ")
if confirm != 'YES':
print("DB legacy cleanup aborted!")
return
service.prepare_service(conf=conf)
for purpose in ['metering', 'event']:
url = (getattr(conf.database, '%s_connection' % purpose) or
conf.database.connection)
parsed = urlparse.urlparse(url)
if parsed.password:
masked_netloc = '****'.join(parsed.netloc.rsplit(parsed.password))
masked_url = parsed._replace(netloc=masked_netloc)
masked_url = urlparse.urlunparse(masked_url)
else:
masked_url = url
LOG.info(_LI('Starting to drop alarm and alarm history tables in '
'%(purpose)s backend: %(url)s'), {
'purpose': purpose, 'url': masked_url})
connection_scheme = parsed.scheme
conn = storage.get_connection_from_config(conf, purpose)
if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql',
'sqlite'):
engine = conn._engine_facade.get_engine()
meta = sa.MetaData(bind=engine)
for table_name in ['alarm', 'alarm_history']:
if engine.has_table(table_name):
alarm = sa.Table(table_name, meta, autoload=True)
alarm.drop()
LOG.info(_LI("Legacy %s table of SQL backend has been "
"dropped."), table_name)
else:
LOG.info(_LI('%s table does not exist.'), table_name)
elif connection_scheme == 'hbase':
with conn.conn_pool.connection() as h_conn:
tables = h_conn.tables()
table_name_mapping = {'alarm': 'alarm',
'alarm_h': 'alarm history'}
for table_name in ['alarm', 'alarm_h']:
try:
if table_name in tables:
h_conn.disable_table(table_name)
h_conn.delete_table(table_name)
LOG.info(_LI("Legacy %s table of Hbase backend "
"has been dropped."),
table_name_mapping[table_name])
else:
LOG.info(_LI('%s table does not exist.'),
table_name_mapping[table_name])
except Exception as e:
LOG.error(_LE('Error occurred while dropping alarm '
'tables of Hbase, %s'), e)
elif connection_scheme == 'mongodb':
for table_name in ['alarm', 'alarm_history']:
if table_name in conn.db.conn.collection_names():
conn.db.conn.drop_collection(table_name)
LOG.info(_LI("Legacy %s table of Mongodb backend has been "
"dropped."), table_name)
else:
LOG.info(_LI('%s table does not exist.'), table_name)
LOG.info('Legacy alarm tables cleanup done.')
|
StarcoderdataPython
|
1784678
|
import base64
import hmac
import hashlib
import time
import requests
import urllib
class azureiot:
API_VERSION = '2016-02-03'
TOKEN_VALID_SECS = 10
TOKEN_FORMAT = 'SharedAccessSignature sig=%s&se=%s&sr=%s'
def __init__(self, connectionString=None):
if connectionString != None:
iotHost, keyValue = [sub[sub.index('=') + 1:] for sub in connectionString.split(";")]
self.iotHost = iotHost
self.keyValue = keyValue
def _buildExpiryOn(self):
return '%d' % (time.time() + self.TOKEN_VALID_SECS)
def _buildIoTHubSasToken(self, deviceId):
resourceUri = '%s/devices/%s' % (self.iotHost, deviceId)
targetUri = resourceUri.lower()
expiryTime = self._buildExpiryOn()
toSign = '%s\n%s' % (targetUri, expiryTime)
key = base64.b64decode(self.keyValue.encode('utf-8'))
signature = urllib.quote(
base64.b64encode(
hmac.HMAC(key, toSign.encode('utf-8'), hashlib.sha256).digest()
)
).replace('/', '%2F')
return self.TOKEN_FORMAT % (signature, expiryTime, targetUri)
def sendMessage(self, deviceId, message):
sasToken = self._buildIoTHubSasToken(deviceId)
url = 'https://%s/devices/%s/messages/events?api-version=%s' % (self.iotHost, deviceId, self.API_VERSION)
r = requests.post(url, headers={'Authorization': sasToken}, data=message)
return r.text, r.status_code
|
StarcoderdataPython
|
115737
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gym
gym.register(
id='GridWorld-v1',
entry_point='simple_grid:MyGridWorld1',
max_episode_steps=200,
reward_threshold=100.0
)
from skinner import FiniteSet
from objects import Robot, NeuralRobot
class MyRobot(Robot):
"""there are 3 nubmers in state
The first 2 present position by default
"""
init_power = 20
@property
def power(self):
# the third number of state presents the power
return self.state[2]
def _reset(self):
self.state = 1, self.env.n_rows, self.init_power
def _next_state(self, state, action):
"""transition function
Arguments:
state -- state before action
action -- the action selected by the agent
Returns:
new state
Raises:
Exception -- invalid action
"""
if action=='e':
next_state = (state[0]+1, state[1], state[2])
elif action=='w':
next_state = (state[0]-1, state[1], state[2])
elif action=='s':
next_state = (state[0], state[1]-1, state[2])
elif action=='n':
next_state = (state[0], state[1]+1, state[2])
else:
raise Exception('invalid action!')
if self.env.collide(next_state[:2]):
next_state = state
next_state = *next_state[:2], next_state[2]-1
else:
if next_state[:2] == self.env.CHARGER:
next_state = *next_state[:2], 35
else:
next_state = *next_state[:2], next_state[2]-1
return next_state
def _get_reward(self, state0, action, state1):
"""reward function
called in step method
Arguments:
state0 -- state before action
action -- the action
state1 -- state after action
Returns:
number -- reward
"""
r = 0
# if state0[-1] <=3:
# if state1[:2] == self.env.CHARGER:
# r += .1
# else:
# r -= .1
# else:
# if state1[:2] == self.env.CHARGER:
# r -= .1
if state1[:2] in self.env.TRAPS:
r -= 100
elif state1[:2] in self.env.DEATHTRAPS:
r -= 200
elif state1[:2] == self.env.GOLD:
r += 100
elif state0[:2] == state1[:2]:
r -= 2
else:
r -= 1
return r
def Q(self, key):
# predict Q value of key based on current QTable
if key in self.QTable:
return self.QTable[key]
dm = 100
q = 0
for k, v in self.QTable.items():
if k[1] == key[1] and k[0][:2] == key[0][:2]:
d = abs(k[0][2] - key[0][2])
if d < dm:
dm = d
q = self.QTable[key]
return q
agent = MyRobot(alpha=0.7, gamma=0.99, epsilon=0.1)
if __name__ == '__main__':
env = gym.make('GridWorld-v1')
env.config('config1.yaml')
env.add_agent(agent)
env.seed()
env.demo(n_epochs=2000)
|
StarcoderdataPython
|
5095984
|
import ply.yacc as yacc
import ply.lex as lex
from etcdb import PARSER_LOCK
from etcdb.sqlparser import etcdb_lexer
from etcdb.sqlparser.sql_tree import SQLTree
# noinspection PyUnresolvedReferences
from etcdb_lexer import tokens
precedence = (
('left', 'AND', 'OR'),
('right', 'UNOT'),
)
def p_statement(p):
"""statement : select_statement
| show_tables_statement
| create_table_statement
| create_database_statement
| show_databases_statement
| use_database_statement
| commit_statement
| set_statement
| insert_statement
| delete_statement
| drop_database_statement
| drop_table_statement
| desc_table_statement
| update_table_statement
| wait_statement"""
p[1].success = True
p[0] = p[1]
def p_wait_statement(p):
"""wait_statement : WAIT select_item_list FROM identifier opt_WHERE opt_AFTER"""
tree = SQLTree()
tree.query_type = "WAIT"
tree.table = p[4]
tree.expressions = p[2]
tree.where = p[5]
tree.options = p[6]
p[0] = tree
def p_opt_after_empty(p):
"""opt_AFTER : """
def p_opt_after(p):
"""opt_AFTER : AFTER NUMBER"""
after = {
'after': int(p[2])
}
p[0] = after
def p_update_table_statement(p):
"""update_table_statement : UPDATE identifier SET col_expr_list opt_WHERE opt_USE_LOCK"""
tree = SQLTree()
tree.query_type = "UPDATE"
tree.table = p[2]
tree.expressions = p[4]
tree.where = p[5]
tree.lock = p[6]
p[0] = tree
def p_col_expr_list_one(p):
"""col_expr_list : col_expr"""
p[0] = [p[1]]
def p_col_expr_list(p):
"""col_expr_list : col_expr_list ',' col_expr"""
p[1].append(p[3])
p[0] = p[1]
def p_col_expr(p):
"""col_expr : identifier '=' expr"""
p[0] = (p[1], p[3])
def p_desc_table_statement(p):
"""desc_table_statement : DESC identifier"""
tree = SQLTree()
tree.table = p[2]
tree.query_type = "DESC_TABLE"
p[0] = tree
def p_drop_database_statement(p):
"""drop_database_statement : DROP DATABASE identifier"""
tree = SQLTree()
tree.db = p[3]
tree.query_type = "DROP_DATABASE"
p[0] = tree
def p_drop_table_statement(p):
"""drop_table_statement : DROP TABLE identifier opt_IF_EXISTS"""
tree = SQLTree()
tree.table = p[3]
tree.query_type = "DROP_TABLE"
tree.options['if_exists'] = p[4]
p[0] = tree
def p_opt_if_exists_empty(p):
"""opt_IF_EXISTS : """
p[0] = False
def p_opt_if_exists(p):
"""opt_IF_EXISTS : IF EXISTS"""
p[0] = True
def p_insert_statement(p):
"""insert_statement : INSERT INTO identifier opt_fieldlist VALUES '(' values_list ')' opt_USE_LOCK"""
tree = SQLTree()
tree.query_type = "INSERT"
tree.table = p[3]
n_fields = len(p[4])
n_values = len(p[7])
if n_fields != n_values:
msg = 'There are {n_fields} fields, but {n_values} values'.format(
n_fields=n_fields,
n_values=n_values
)
raise SQLParserError(msg)
for i in xrange(n_fields):
tree.fields[p[4][i]] = p[7][i]
tree.lock = p[9]
p[0] = tree
def p_opt_fieldlist_empty(p):
"""opt_fieldlist : """
p[0] = {}
def p_opt_fieldlist(p):
"""opt_fieldlist : '(' fieldlist ')'"""
p[0] = p[2]
def p_fieldlist_one(p):
"""fieldlist : identifier"""
p[0] = [p[1]]
def p_fieldlist_many(p):
"""fieldlist : fieldlist ',' identifier """
if p[1] is None:
p[0] = [p[3]]
else:
p[1].append(p[3])
p[0] = p[1]
def p_values_list_one(p):
"""values_list : value"""
p[0] = [p[1]]
def p_values_list_many(p):
"""values_list : values_list ',' value"""
if p[1] is None:
p[0] = p[3]
else:
p[1].append(p[3])
p[0] = p[1]
def p_set_statement(p):
"""set_statement : set_autocommit_statement
| set_names_statement"""
p[0] = p[1]
def p_set_names_statement(p):
"""set_names_statement : SET NAMES STRING"""
tree = SQLTree()
tree.query_type = "SET_NAMES"
p[0] = tree
def p_set_statement_autocommit(p):
"""set_autocommit_statement : SET AUTOCOMMIT '=' NUMBER"""
tree = SQLTree()
tree.query_type = "SET_AUTOCOMMIT"
tree.options['autocommit'] = int(p[4])
p[0] = tree
def p_commit_statement(p):
"""commit_statement : COMMIT"""
tree = SQLTree()
tree.query_type = "COMMIT"
p[0] = tree
def p_create_table_statement(p):
"""create_table_statement : CREATE TABLE identifier '(' create_definition_list ')'"""
tree = SQLTree()
tree.query_type = "CREATE_TABLE"
tree.table = p[3]
tree.fields = p[5]
p[0] = tree
def p_create_database_statement(p):
"""create_database_statement : CREATE DATABASE identifier"""
tree = SQLTree()
tree.query_type = "CREATE_DATABASE"
tree.db = p[3]
p[0] = tree
def p_show_databases_statement(p):
"""show_databases_statement : SHOW DATABASES"""
tree = SQLTree()
tree.query_type = "SHOW_DATABASES"
p[0] = tree
def p_use_database_statement(p):
"""use_database_statement : USE identifier"""
tree = SQLTree()
tree.query_type = "USE_DATABASE"
tree.db = p[2]
p[0] = tree
def p_identifier(p):
"""identifier : STRING"""
p[0] = p[1]
def p_identifier_escaped(p):
"""identifier : '`' STRING '`'"""
p[0] = p[2]
def p_create_definition_list_one(p):
"""create_definition_list : create_definition"""
p[0] = {
p[1][0]: p[1][1]
}
def p_create_definition_list_many(p):
"""create_definition_list : create_definition_list ',' create_definition"""
create_definition_list = p[1]
create_definition_list[p[3][0]] = p[3][1]
p[0] = create_definition_list
def p_create_definition(p):
"""create_definition : identifier column_definition"""
p[0] = p[1], p[2]
def p_column_definition(p):
"""column_definition : data_type opt_column_def_options_list"""
p[0] = {
'type': p[1]
}
p[0]['options'] = p[2]
def p_data_type(p):
"""data_type : INTEGER opt_UNSIGNED
| VARCHAR '(' NUMBER ')'
| DATETIME
| DATETIME '(' NUMBER ')'
| INT opt_UNSIGNED
| LONGTEXT
| SMALLINT opt_UNSIGNED
| TINYINT
| BOOL"""
p[0] = p[1]
def p_opt_UNSIGNED(p):
"""opt_UNSIGNED :
| UNSIGNED"""
def p_opt_column_def_options_list_empty(p):
"""opt_column_def_options_list : """
p[0] = {
'nullable': True
}
def p_opt_column_def_options_list(p):
"""opt_column_def_options_list : opt_column_def_options opt_column_def_options_list"""
if p[2] is None:
p[0] = p[1]
else:
p[2].update(p[1])
p[0] = p[2]
def p_DEFAULT_CLAUSE(p):
"""opt_column_def_options : DEFAULT value"""
p[0] = {
'default': p[2]
}
def p_NULL(p):
"""opt_column_def_options : NULL"""
p[0] = {
'nullable': True
}
def p_NOT_NULL(p):
"""opt_column_def_options : NOT NULL"""
p[0] = {
'nullable': False
}
def p_AUTO_INCREMENT(p):
"""opt_column_def_options : AUTO_INCREMENT"""
p[0] = {
'auto_increment': True
}
def p_PRIMARY_KEY(p):
"""opt_column_def_options : PRIMARY KEY"""
p[0] = {
'primary': True
}
def p_UNIQUE(p):
"""opt_column_def_options : UNIQUE"""
p[0] = {
'unique': True
}
def p_value(p):
"""value : q_STRING
| NUMBER
| STRING_VALUE """
p[0] = p[1]
def p_q_STRING(p):
"""q_STRING : "'" STRING "'" """
p[0] = p[2]
def p_q_STRING_EMPTY(p):
"""q_STRING : """
p[0] = ""
def p_select_statement(p):
"""select_statement : SELECT select_item_list opt_FROM opt_WHERE opt_ORDER_BY opt_LIMIT"""
tree = SQLTree()
tree.query_type = "SELECT"
tree.db = p[3][0]
tree.table = p[3][1]
tree.expressions = p[2]
tree.where = p[4]
try:
tree.limit = int(p[6])
except TypeError:
tree.limit = None
tree.order = p[5]
p[0] = tree
def p_opt_ORDER_BY_empty(p):
"""opt_ORDER_BY : """
p[0] = None
def p_opt_ORDER_BY_simple(p):
"""opt_ORDER_BY : ORDER BY identifier opt_ORDER_DIRECTION"""
order = {
'by': p[3],
'direction': p[4]
}
p[0] = order
def p_opt_ORDER_BY_extended(p):
"""opt_ORDER_BY : ORDER BY identifier '.' identifier opt_ORDER_DIRECTION"""
order = {
'by': p[5],
'direction': p[6]
}
p[0] = order
def p_opt_ORDER_DIRECTION_empty(p):
"""opt_ORDER_DIRECTION : """
p[0] = 'ASC'
def p_opt_ORDER_DIRECTION(p):
"""opt_ORDER_DIRECTION : ASC
| DESC """
p[0] = p[1]
def p_opt_LIMIT_empty(p):
"""opt_LIMIT : """
p[0] = None
def p_opt_LIMIT(p):
"""opt_LIMIT : LIMIT NUMBER"""
p[0] = p[2]
def p_show_tables_statement(p):
"""show_tables_statement : SHOW opt_FULL TABLES"""
tree = SQLTree()
tree.query_type = "SHOW_TABLES"
tree.options['full'] = p[2]
p[0] = tree
def p_opt_from_empty(p):
"""opt_FROM : """
p[0] = None, None
def p_opt_from(p):
"""opt_FROM : FROM table_reference"""
p[0] = p[2]
def p_table_reference(p):
"""table_reference : identifier"""
p[0] = None, p[1]
def p_table_reference_w_database(p):
"""table_reference : identifier '.' identifier"""
p[0] = p[1], p[3]
def p_opt_FULL_empty(p):
"""opt_FULL : """
p[0] = False
def p_opt_FULL(p):
"""opt_FULL : FULL"""
p[0] = True
def p_select_item_list_select_item(p):
"""select_item_list : select_item """
p[0] = [p[1]]
def p_select_item_list(p):
"""select_item_list : select_item_list ',' select_item """
select_item_list = p[1]
select_item_list .append(p[3])
p[0] = select_item_list
def p_select_item_list_star(p):
"""select_item_list : '*'"""
p[0] = [
(
('*', None),
None
)
]
def p_select_item(p):
"""select_item : select_item2 select_alias"""
p[0] = (p[1], p[2])
def p_select_item2(p):
"""select_item2 : table_wild
| expr """
p[0] = p[1]
def p_select_alias_empty(p):
"""select_alias : """
p[0] = None
def p_select_alias(p):
"""select_alias : AS identifier"""
p[0] = p[2]
def p_table_wild(p):
"""table_wild : identifier '.' '*' """
p[0] = ("*", p[1])
def p_opt_USE_LOCK_empty(p):
"""opt_USE_LOCK : """
p[0] = None
def p_opt_USE_LOCK(p):
"""opt_USE_LOCK : USE LOCK STRING_VALUE """
p[0] = p[3]
def p_opt_WHERE_empty(p):
"""opt_WHERE : """
p[0] = None
def p_opt_WHERE(p):
"""opt_WHERE : WHERE expr"""
p[0] = p[2]
def p_expr_OR(p):
"""expr : expr OR expr"""
p[0] = ('OR', p[1], p[3])
def p_expr_AND(p):
"""expr : expr AND expr"""
p[0] = ('AND', p[1], p[3])
def p_expr_NOT(p):
"""expr : NOT expr %prec UNOT"""
p[0] = ('NOT', p[2])
def p_expr_bool_primary(p):
"""expr : boolean_primary"""
p[0] = ('bool_primary', p[1])
def p_boolean_primary_is_null(p):
"""boolean_primary : boolean_primary IS NULL"""
p[0] = ('IS NULL', p[1])
def p_boolean_primary_is_not_null(p):
"""boolean_primary : boolean_primary IS NOT NULL"""
p[0] = ('IS NOT NULL', p[1])
def p_boolean_primary_comparison(p):
"""boolean_primary : boolean_primary comparison_operator predicate"""
p[0] = (p[2], p[1], p[3])
def p_boolean_primary_predicate(p):
"""boolean_primary : predicate"""
p[0] = ('predicate', p[1])
def p_comparison_operator(p):
"""comparison_operator : '='
| GREATER_OR_EQ
| '>'
| LESS_OR_EQ
| '<'
| N_EQ"""
p[0] = p[1]
def p_predicate(p):
"""predicate : bit_expr """
p[0] = ('bit_expr', p[1])
def p_predicate_in(p):
"""predicate : bit_expr IN '(' list_expr ')'"""
p[0] = (
'IN',
p[1],
p[4]
)
def p_list_expr_one(p):
"""list_expr : expr"""
p[0] = [p[1]]
def p_list_expr(p):
"""list_expr : list_expr ',' expr"""
p[1].append(p[3])
p[0] = p[1]
def p_bit_expr(p):
"""bit_expr : simple_expr"""
p[0] = ('simple_expr', p[1])
def p_simple_expr_identifier(p):
"""simple_expr : identifier"""
p[0] = ('IDENTIFIER', p[1])
def p_simple_expr_identifier_full(p):
"""simple_expr : identifier '.' identifier"""
p[0] = ('IDENTIFIER', p[1] + '.' + p[3])
def p_simple_expr_parent(p):
"""simple_expr : '(' expr ')'"""
p[0] = ('expr', p[2])
def p_simple_expr_variable(p):
"""simple_expr : variable"""
p[0] = ('variable', p[1])
def p_variable(p):
"""variable : '@' '@' STRING"""
p[0] = p[3]
def p_simple_expr_literal(p):
"""simple_expr : literal"""
p[0] = ('literal', p[1])
def p_literal(p):
"""literal : q_STRING
| NUMBER
| STRING_VALUE"""
p[0] = p[1]
def p_simple_expr_function_call(p):
"""simple_expr : function_call"""
p[0] = ('function_call', p[1])
def p_function_call_version(p):
"""function_call : VERSION '(' ')'"""
p[0] = 'VERSION'
def p_function_call_count_star(p):
"""function_call : COUNT '(' '*' ')'"""
p[0] = 'COUNT'
def p_delete_statement(p):
"""delete_statement : DELETE FROM identifier opt_WHERE"""
tree = SQLTree()
tree.query_type = 'DELETE'
tree.table = p[3]
tree.where = p[4]
p[0] = tree
def p_error(t):
if t:
msg = "Syntax error at lexeme '{value}' (type: '{type}'). " \
"Line: {lineno}, position: {lexpos}".format(value=t.value,
type=t.type,
lineno=t.lineno,
lexpos=t.lexpos)
raise SQLParserError(msg)
else:
raise SQLParserError("Syntax error")
class SQLParser(object):
def __init__(self):
self._parser = yacc.yacc(debug=False)
def parse(self, *args, **kwargs):
try:
PARSER_LOCK.acquire()
# noinspection PyUnusedLocal
lexer = lex.lex(module=etcdb_lexer)
tree = self._parser.parse(*args, **kwargs)
tree.query = args[0]
return tree
except SQLParserError:
self._parser.restart()
raise
finally:
PARSER_LOCK.release()
class SQLParserError(Exception):
"""All SQL parsing errors"""
|
StarcoderdataPython
|
5090843
|
<reponame>al-arz/the-tale<gh_stars>10-100
from aiohttp import web
from tt_web import log
from tt_web import postgresql
from . import operations
async def on_startup(app):
await postgresql.initialize(app['config']['database'])
await operations.initialize_timestamps_cache()
async def on_cleanup(app):
await postgresql.deinitialize()
def register_routers(app):
from . import handlers
app.router.add_post('/version', handlers.version)
app.router.add_post('/push-message', handlers.push_message)
app.router.add_post('/diary', handlers.diary)
def create_application(config):
app = web.Application()
app['config'] = config
log.initilize(config['log'])
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
register_routers(app)
return app
|
StarcoderdataPython
|
3496489
|
import types
import synapse.common as s_common
from synapse.eventbus import EventBus
class Task(EventBus):
'''
A cancelable Task abstraction which operates much like a Future
but with some additional features.
'''
def __init__(self, iden=None):
EventBus.__init__(self)
if iden is None:
iden = s_common.guid()
self.info = {}
self.iden = iden
self.on('task:fini', self._onTaskFini)
def _onTaskFini(self, mesg):
retn = mesg[1].get('retn')
if retn is not None:
self.fire('task:retn', task=self.iden, retn=retn)
self.fini()
def get(self, prop, defval=None):
'''
Get a value from the info dict for the task.
Args:
prop (str): The name of the info value.
defval (obj): The default value to return if not found
Returns:
(obj): The object from the info dict (or None)
'''
return self.info.get(prop, defval)
def set(self, prop, valu):
'''
Set a value in the info dict for the task.
Args:
prop (str): The name of the info dict value
valu (obj): The value to set in the info dict
'''
self.info[prop] = valu
def err(self, info):
'''
Fire an error return value for the task.
Args:
info (dict): Exception info dict (see synapse.common.excinfo )
'''
retn = (False, info)
self.fire('task:retn', task=self.iden, retn=retn)
def retn(self, valu):
'''
Fire a result return value for the task.
Args:
valu (obj): The return value
'''
retn = (True, valu)
self.fire('task:retn', task=self.iden, retn=retn)
def onretn(self, func):
'''
Provide a function to receive return values.
The specififed callback will be called with a retn tuple
defined as (ok,valu). If ok is True, valu is a return
valu, if ok is False, valu is an excinfo dictionary.
Args:
func (function): Callback for a retn tuple.
'''
def prox(mesg):
return func(mesg[1].get('retn'))
self.on('task:retn', prox)
def run(self):
'''
Execute the task.
'''
if not self.isfini:
self._task_run()
self.fini()
def _task_run(self): # pragma: no cover
raise s_common.NoSuchImpl(name='_task_run')
def __call__(self):
self.run()
class CallTask(Task):
'''
An extension for a runnable task.
Args:
call ((func,[],{})): A tuple of call details.
'''
def __init__(self, call):
Task.__init__(self)
self._call = call
def _task_run(self):
func, args, kwargs = self._call
try:
valu = func(*args, **kwargs)
if isinstance(valu, types.GeneratorType):
for v in valu:
self.retn(v)
else:
self.retn(valu)
except Exception as e:
self.err(s_common.excinfo(e))
|
StarcoderdataPython
|
5085040
|
from parsy import generate, match_item, test_item
class Command:
def __init__(self, parameter):
self.parameter = parameter
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, self.parameter)
class Forward(Command):
pass
class Backward(Command):
pass
class Right(Command):
pass
class Left(Command):
pass
commands = {
'fd': Forward,
'bk': Backward,
'rt': Right,
'lt': Left,
}
@generate
def statement():
cmd_name = yield test_item(lambda i: i in commands.keys(), "command")
parameter = yield test_item(lambda i: isinstance(i, int), "number")
yield match_item('\n')
return commands[cmd_name](int(parameter))
program = statement.many()
import pytest # noqa isort:skip
test_item = pytest.mark.skip(test_item) # This is not a test
|
StarcoderdataPython
|
219803
|
<filename>src/alembic/versions/15ea3c2cf83d_pr_comment_editing.py
"""Adding column to store edited_by and edited_on a PR comment
Revision ID: 15ea3c2cf83d
Revises: <PASSWORD>
Create Date: 2015-11-09 16:18:47.192088
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
''' Add the columns editor_id and edited_on to the table
pull_request_comments.
'''
op.add_column(
'pull_request_comments',
sa.Column(
'editor_id',
sa.Integer,
sa.ForeignKey('users.id', onupdate='CASCADE'),
nullable=True)
)
op.add_column(
'pull_request_comments',
sa.Column(
'edited_on',
sa.DateTime,
nullable=True)
)
def downgrade():
''' Remove the columns editor_id and edited_on from the table
pull_request_comments.
'''
op.drop_column('pull_request_comments', 'editor_id')
op.drop_column('pull_request_comments', 'edited_on')
|
StarcoderdataPython
|
4918774
|
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from xsdata.models.datatype import XmlPeriod
__NAMESPACE__ = "NISTSchema-SV-IV-union-gMonthDay-gYearMonth-enumeration-5-NS"
class NistschemaSvIvUnionGMonthDayGYearMonthEnumeration5Type(Enum):
VALUE_05_07 = XmlPeriod("--05-07")
VALUE_07_18 = XmlPeriod("--07-18")
VALUE_2011_09 = XmlPeriod("2011-09")
VALUE_2019_10 = XmlPeriod("2019-10")
VALUE_03_13 = XmlPeriod("--03-13")
VALUE_02_15 = XmlPeriod("--02-15")
VALUE_11_30 = XmlPeriod("--11-30")
VALUE_1997_02 = XmlPeriod("1997-02")
@dataclass
class NistschemaSvIvUnionGMonthDayGYearMonthEnumeration5:
class Meta:
name = "NISTSchema-SV-IV-union-gMonthDay-gYearMonth-enumeration-5"
namespace = "NISTSchema-SV-IV-union-gMonthDay-gYearMonth-enumeration-5-NS"
value: Optional[NistschemaSvIvUnionGMonthDayGYearMonthEnumeration5Type] = field(
default=None,
metadata={
"required": True,
}
)
|
StarcoderdataPython
|
3468804
|
<gh_stars>0
# Generated by Django 3.1.1 on 2020-12-15 10:20
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import stdimage.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cart_id', models.CharField(blank=True, max_length=40, null=True)),
('street_address', models.CharField(max_length=100)),
('apartment_address', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('city', models.CharField(max_length=60)),
('zip', models.CharField(max_length=10)),
('address_type', models.CharField(choices=[('B', 'Billing'), ('S', 'Shipping')], max_length=1)),
('payment_option', models.CharField(choices=[('D', 'Debit Card'), ('P', 'Paypal'), ('M', 'M-Pesa'), ('C', 'Cash On Delivery'), ('S', 'Stripe')], max_length=2)),
('default', models.BooleanField(default=False)),
('name', models.CharField(blank=True, max_length=60, null=True)),
('email', models.EmailField(blank=True, max_length=50, null=True)),
('phone', models.CharField(blank=True, max_length=20, null=True)),
('date_updated', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Addresses',
'ordering': ['-date_updated'],
},
),
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('title', models.CharField(help_text='Unique title to identify Your store and your product line', max_length=100, unique=True)),
('active', models.BooleanField(default=True)),
('is_featured', models.BooleanField(blank=True, default=False, null=True)),
('image', stdimage.models.StdImageField(blank=True, default='images/brands/brand_background/default.jpg', help_text="wallpaper for your store.Leave blank if you don't have one", null=True, upload_to='images/brands/brand_background')),
('logo', stdimage.models.StdImageField(blank=True, help_text="logo for your store, Leave blank if you don't have one", null=True, upload_to='images/brands/brand_logo')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10)),
('amount', models.DecimalField(decimal_places=2, default=20, max_digits=9)),
('valid', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('cart_id', models.CharField(blank=True, max_length=40, null=True)),
('status', models.IntegerField(choices=[(10, 'New'), (20, 'Paid'), (30, 'Done')], default=10)),
('ref_code', models.CharField(max_length=20)),
('start_date', models.DateTimeField(auto_now_add=True)),
('order_date', models.DateTimeField(auto_now=True)),
('ordered', models.BooleanField(default=False)),
('payment', models.CharField(blank=True, max_length=2, null=True)),
('being_delivered', models.BooleanField(default=False)),
('received', models.BooleanField(default=False)),
('refund_requested', models.BooleanField(default=False)),
('refund_granted', models.BooleanField(default=False)),
('email', models.EmailField(blank=True, max_length=50, null=True)),
('phone', models.CharField(blank=True, max_length=20, null=True)),
('name', models.CharField(blank=True, max_length=60, null=True)),
('billing_address1', models.CharField(max_length=60)),
('billing_address2', models.CharField(blank=True, max_length=60)),
('billing_zip_code', models.CharField(max_length=12)),
('billing_country', models.CharField(max_length=3)),
('billing_city', models.CharField(blank=True, max_length=12, null=True)),
('shipping_address1', models.CharField(max_length=60)),
('shipping_address2', models.CharField(blank=True, max_length=60)),
('shipping_zip_code', models.CharField(max_length=12)),
('shipping_country', models.CharField(max_length=3)),
('shipping_city', models.CharField(blank=True, max_length=12, null=True)),
('comments', models.TextField(blank=True)),
],
options={
'ordering': ['-modified'],
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('cart_id', models.CharField(blank=True, max_length=40, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('date_ordered', models.DateTimeField(auto_now=True)),
('status', models.IntegerField(choices=[(10, 'New'), (20, 'Processing'), (30, 'Sent'), (40, 'Cancelled'), (45, 'in_transit'), (50, 'Delivered')], default=10)),
('order_received', models.BooleanField(default=False)),
('ordered', models.BooleanField(default=False)),
('quantity', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('comments', models.TextField(blank=True)),
],
options={
'ordering': ['-date_added'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(help_text='Unique value for product page URL, created from the product title.', unique=True)),
('stock', models.IntegerField(default=1)),
('sku', models.CharField(max_length=120)),
('in_stock', models.BooleanField(blank=True, default=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=9)),
('discount_price', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True, validators=[django.core.validators.MinValueValidator(1.0)])),
('made_in_africa', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_bestseller', models.BooleanField(default=False)),
('is_featured', models.BooleanField(default=False)),
('is_bestrated', models.BooleanField(default=False)),
('description', models.TextField()),
('additional_information', models.TextField(blank=True, null=True)),
('meta_keywords', models.CharField(help_text='Comma-delimited set of SEO keywords that summarize the type of product above max 4 words', max_length=100, verbose_name='Meta Keywords')),
('meta_description', models.CharField(help_text='help sellers get your product easily. Give a simple short description about the page content you have added. This information makes iteasy for customers to get your product and offers an overview of it', max_length=255, verbose_name='Meta Description')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('category_choice', models.CharField(choices=[('At', 'Arts, Crafts'), ('Bk', 'Books'), ('Bb', 'Baby Care'), ('Be', 'Beautiful 2'), ('Ca', 'Camera & Photo'), ('S', 'Shirt'), ('Sw', 'Sport wear'), ('Ow', 'Outwear'), ('Am', 'Automotive & Motorcycle'), ('Ca', 'Cell Phones & Accessories'), ('El', 'Electronics'), ('Fa', 'Fashion'), ('Fu', 'Furniture'), ('So', 'Sokoni'), ('Wo', 'Women Fashion')], help_text='Choose the main category for the product', max_length=2)),
],
options={
'verbose_name_plural': 'Products',
'db_table': 'Products',
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='ProductAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('name', models.CharField(max_length=300)),
('description', models.TextField(blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('value', models.CharField(max_length=500)),
('description', models.TextField(blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('image', stdimage.models.StdImageField(upload_to='images/products')),
('in_display', models.BooleanField(default=True)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='ProductReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('date', models.DateTimeField(auto_now_add=True)),
('rating', models.PositiveSmallIntegerField(choices=[(5, 5), (4, 4), (3, 3), (2, 2), (1, 1)], default=5)),
('is_approved', models.BooleanField(default=True)),
('content', models.TextField()),
('country', django_countries.fields.CountryField(blank=True, max_length=2, null=True)),
],
),
migrations.CreateModel(
name='RequestRefund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reason', models.TextField()),
('accepted', models.BooleanField(default=False)),
('email', models.EmailField(max_length=254)),
('ref_code', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='StatusCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('short_name', models.IntegerField(choices=[(10, 'New'), (20, 'Paid'), (30, 'Processing'), (40, 'Sent'), (50, 'Cancelled'), (60, 'in_transit'), (70, 'Delivered')], default=10)),
('name', models.CharField(max_length=300)),
('description', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='StripePayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_charge_id', models.CharField(max_length=50)),
('amount', models.DecimalField(decimal_places=2, max_digits=9)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='WishList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False, verbose_name='creation date and time')),
('modified', models.DateTimeField(editable=False, null=True, verbose_name='modification date and time')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='me2ushop.product')),
],
options={
'abstract': False,
},
),
]
|
StarcoderdataPython
|
3584627
|
import psycopg2, multiprocessing
from dbconfig import chitanka_dbname, chitanka_dbuser, chitanka_dbpassword
import os
import re, nltk
from pathlib import Path
import requests, os,re
import glob
def importData(start, end):
my_dirs = glob.glob("../books/*")
my_dirs.sort()
try:
connection = psycopg2.connect("dbname='" + chitanka_dbname +
"' user='" + chitanka_dbuser +
"' password='" + <PASSWORD> + "'")
connection.autocommit = True
cur = connection.cursor()
except Exception as e:
print(e)
for my_dir in my_dirs[start:end]:
author_name = re.sub("\-", " ", my_dir[9:]) # 9 -> to skip the ../books/ and use only the author name
try:
sql = 'insert into authors (name) values(%s) RETURNING id'
cur.execute(sql, (author_name, ))
author_id = cur.fetchone()[0]
connection.commit()
except Exception as e:
print(e)
folder_location = "../books/" + my_dir[9:]
my_files = os.listdir("../books/" + my_dir[9:])
author_words_count = dict()
for my_file in my_files:
if my_file.endswith(".txt"):
names = my_file.split("-")
book_name = ""
for name in names[1:]:
book_name += name + " "
book_name.strip()
book_name = book_name[:len(book_name) - 5] #to skip .txt
file_location = os.path.join(folder_location, my_file)
f = open(file_location, encoding='utf-8', mode='r')
file_content = f.read()
f.close()
current_file_words = list(set(re.findall("[а-яА-Я]{3,}", file_content)))
sentences = nltk.tokenize.sent_tokenize(file_content)
try:
words_in_book = len(current_file_words)
sql = 'insert into books (name, words_count, author_id) values(%s, %s, %s) RETURNING id'
cur.execute(sql, (book_name, words_in_book, author_id))
book_id = cur.fetchone()[0]
connection.commit()
except Exception as e:
print(e)
try:
for sentence in sentences[3:len(sentences) - 4]:
if not sentence == "":
words_in_sentence = len(re.findall("[а-яА-Я]{3,}", sentence))
sql = 'insert into sentences (sentence, words_count, book_id) values(%s, %s, %s)'
cur.execute(sql, (sentence, words_in_sentence, book_id))
except Exception as e:
print(e)
for word in current_file_words:
# populate author_words dictionary
word = word.lower()
if word in author_words_count:
author_words_count[word] += 1
else:
author_words_count[word] = 1
try:
sql = 'insert into words (word) values(%s) RETURNING id'
cur.execute(sql, (word.lower(),))
word_id = cur.fetchone()[0]
sql2 = 'insert into books_words (book_id, word_id) values(%s, %s)'
cur.execute(sql2, (book_id, word_id))
print("Word added")
connection.commit()
except Exception as e:
print("Word already exists")
try:
sql2 = 'select id from words where word=%s'
cur.execute(sql2, (word.lower(),))
duplicate_word_id = cur.fetchone()[0]
sql2 = 'insert into books_words (book_id, word_id) values(%s, %s)'
cur.execute(sql2, (book_id, duplicate_word_id))
except:
print("This word has already been linked with this author")
print(e)
connection.commit()
# insert unique words the author has used
author_unique_words = sum(value == 1 for value in author_words_count.values())
sql = "update authors set words_count=(%s) where name=%s"
cur.execute(sql, (author_unique_words, author_name))
cur.close()
connection.close()
if __name__ == '__main__':
start = 21
end = 22
for i in range(1):
p1 = multiprocessing.Process(target=importData, args=(start, end))
p2 = multiprocessing.Process(target=importData, args=(start + 1, end+1))
p3 = multiprocessing.Process(target=importData, args=(start + 2, end+2))
p4 = multiprocessing.Process(target=importData, args=(start + 3, end+3))
p5 = multiprocessing.Process(target=importData, args=(start + 4, end+4))
p6 = multiprocessing.Process(target=importData, args=(start + 5, end + 5))
p7 = multiprocessing.Process(target=importData, args=(start + 6, end+6))
p8 = multiprocessing.Process(target=importData, args=(start + 7, end+7))
p9 = multiprocessing.Process(target=importData, args=(start + 8, end + 8))
p10 = multiprocessing.Process(target=importData, args=(start + 9, end + 9))
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
start += 10
end += 10
|
StarcoderdataPython
|
3585218
|
#!/usr/bin/env python
#
# Copyright 2019 <NAME>, S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import ssl
import sys
import json
import shlex
import logging
import asyncio
from uuid import uuid4
from urllib.parse import urlparse
from collections import namedtuple
import click
import requests
from aiohttp import web, StreamReader
from aiohttp.web_urldispatcher import UrlDispatcher
log = logging.getLogger('kapow')
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
########################################################################
# Resource Management #
########################################################################
CONNECTIONS = {}
class Connection:
"""
Manages the lifecycle of a PyPow! connection.
Behaves like a memory for the "fields" available in HTTP
connections.
"""
def __init__(self, request):
self._stream = None
self._body = io.BytesIO()
self._status = 200
self._headers = dict()
self._cookies = dict()
self.request = request
async def get(self, key):
"""Get the content of the field `key`."""
res = urlparse(key)
def nth(n):
"""Return the nth element in a path."""
return res.path.split('/')[n]
if res.path == 'request/method':
return self.request.method.encode('utf-8')
elif res.path == 'request/body':
return self.request.content
elif res.path == 'request/path':
return self.request.path.encode('utf-8')
elif res.path == 'request/host':
return self.request.host.encode('utf-8')
elif res.path.startswith('request/matches/'):
return self.request.match_info[nth(2)].encode('utf-8')
elif res.path.startswith('request/params/'):
return self.request.rel_url.query[nth(2)].encode('utf-8')
elif res.path.startswith('request/headers/'):
return self.request.headers[nth(2)].encode('utf-8')
elif res.path.startswith('request/cookies/'):
return self.request.cookies[nth(2)].encode('utf-8')
elif res.path == 'request/form':
data = await self.request.post()
files = [fieldname.encode('utf-8')
for fieldname, field in data.items()]
return b'\n'.join(files)
elif res.path.startswith('request/form/'):
return (await self.request.post())[nth(2)].encode('utf-8')
elif res.path == 'request/files':
data = await self.request.post()
files = [fieldname.encode('utf-8')
for fieldname, field in data.items()
if hasattr(field, 'filename')]
return b'\n'.join(files)
elif res.path.startswith('request/files/'):
name = nth(2)
content = nth(3) # filename / content
field = (await self.request.post())[name]
if content == 'filename':
try:
return field.filename.encode('utf-8')
except Exception:
return b''
elif content == 'content':
try:
return field.file.read()
except Exception:
return b''
else:
raise ValueError(f'Unknown content type {content!r}')
else:
raise ValueError('Unknown path')
async def set(self, key, content):
"""Set the field `key` with the value in `content`."""
res = urlparse(key)
def nth(n):
return res.path.split('/')[n]
if res.path == 'response/status':
self._status = int((await content.read()).decode('utf-8'))
elif res.path == 'response/body':
self._body.write(await content.read())
elif res.path.startswith('response/headers/'):
clean = (await content.read()).rstrip(b'\n').decode('utf-8')
self._headers[nth(2)] = clean
elif res.path.startswith('response/cookies/'):
clean = (await content.read()).rstrip(b'\n').decode('utf-8')
self._cookies[nth(2)] = clean
elif res.path == 'response/stream':
if self._stream is None:
self._stream = web.StreamResponse(status=self._status,
reason="OK",
headers=self._headers)
for name, value in self._cookies.items():
self._stream.set_cookie(name, value)
await self._stream.prepare(self.request)
chunk = await content.readany()
while chunk:
await self._stream.write(chunk)
chunk = await content.readany()
else:
raise ValueError(f'Unknown path {res.path!r}')
async def append(self, key, content):
"""Append to field `key` the value in `content`."""
raise NotImplementedError()
async def build_response(self):
"""Return the appropriate aiohttp.web.*Response."""
if self._stream is None:
response = web.Response(body=self._body.getvalue(),
status=self._status,
headers=self._headers)
for name, value in self._cookies.items():
response.set_cookie(name, value)
return response
else:
await self._stream.write_eof()
return self._stream
async def get_field(request):
"""Get the value of some HTTP field in the given connection."""
id = request.match_info["id"]
field = request.match_info["field"]
try:
connection = CONNECTIONS[id]
except KeyError:
response = web.Response(status=404, reason="Handler ID Not Found")
else:
try:
content = await connection.get(field)
except ValueError:
return web.Response(status=400, reason="Invalid Resource Path")
except KeyError:
return web.Response(status=404, reason="Resource Item Not Found")
except Exception as e:
pass
if isinstance(content, StreamReader):
response = web.StreamResponse(status=200, reason="OK")
await response.prepare(request)
chunk = await content.readany()
while chunk:
await response.write(chunk)
chunk = await content.readany()
await response.write_eof()
else:
response = web.Response(body=content)
return response
async def set_field(request):
"""Set the value of some HTTP field in the given connection."""
id = request.match_info["id"]
field = request.match_info["field"]
try:
connection = CONNECTIONS[id]
except KeyError:
response = web.Response(status=404, reason="Handler ID Not Found")
else:
try:
await connection.set(field, request.content)
except ConnectionResetError:
# Raised when trying to write to an already-closed stream.
request.transport.close()
else:
response = web.Response(body=b'')
return response
########################################################################
# Middlewares #
########################################################################
# @web.middleware
# async def overwrite_server_name(request, handler):
# response = await handler(request)
# response.headers["Kapow-Version"] = "0.0.3"
#
# return response
########################################################################
# Endpoint Execution #
########################################################################
def handle_route(entrypoint, command):
"""
Return an aiohttp route handler that will execute entrypoint and
command in order to manage a Kapow! route.
"""
async def _handle(request):
# Register a new connection to Kapow!
id = f"CONN_{str(uuid4()).replace('-', '_')}"
connection = CONNECTIONS[id] = Connection(request)
# Run entrypoint + command passing the connection id
executable, *params = shlex.split(entrypoint)
args = ' '.join([executable]
+ [shlex.quote(token) for token in params]
+ [shlex.quote(command)])
try:
shell_task = await asyncio.create_subprocess_shell(
args,
env={**os.environ,
"KAPOW_URL": "http://localhost:8081",
"KAPOW_HANDLER_ID": id
},
stdin=asyncio.subprocess.DEVNULL)
await shell_task.wait()
except:
raise
else:
# Respond when the command finish
return await connection.build_response()
finally:
del CONNECTIONS[id]
return _handle
########################################################################
# Route Management #
########################################################################
def get_routes(app):
async def _get_routes(request):
"""Return the list of registered routes."""
data = [{"index": idx,
"method": r.method,
"id": r.id,
"url_pattern": r.path,
"entrypoint": r.entrypoint,
"command": r.command}
for idx, r in enumerate(app["user_routes"])]
return web.json_response(data)
return _get_routes
def get_route(app):
async def _get_route(request):
"""Return requested registered route."""
id = request.match_info["id"]
for idx, r in enumerate(app["user_routes"]):
if r.id == id:
return web.json_response({"index": idx,
"method": r.method,
"id": r.id,
"url_pattern": r.path,
"entrypoint": r.entrypoint,
"command": r.command})
else:
return web.Response(status=404, reason="Not Found")
return _get_route
def insert_route(app):
async def _insert_route(request):
"""Insert a new Kapow! route."""
try:
content = await request.json()
except ValueError:
return web.Response(status=400, reason="Malformed JSON")
try:
index = int(content["index"])
assert index >= 0
method = content.get("method", "GET")
entrypoint = content.get("entrypoint", "/bin/sh -c")
command = content.get("command", "")
route = KapowRoute(method=method,
path=content["url_pattern"],
id="ROUTE_" + str(uuid4()).replace('-', '_'),
entrypoint=entrypoint,
command=command,
handler=handle_route(entrypoint, command))
app.change_routes((app["user_routes"][:index]
+ [route]
+ app["user_routes"][index:]))
except (
InvalidRouteError, KeyError, AssertionError, ValueError) as exc:
return web.Response(status=422, reason="Invalid Route")
else:
app["user_routes"].insert(index, route)
return web.json_response({"id": route.id,
"method": route.method,
"url_pattern": route.path,
"entrypoint": route.entrypoint,
"command": route.command,
"index": index}, status=201)
return _insert_route
def append_route(app):
async def _append_route(request):
"""Append a new Kapow! route."""
try:
content = await request.json()
except ValueError as exc:
return web.Response(status=400, reason="Malformed JSON")
try:
method = content.get("method", "GET")
entrypoint = content.get("entrypoint", "/bin/sh -c")
command = content.get("command", "")
route = KapowRoute(method=method,
path=content["url_pattern"],
id=f"ROUTE_{str(uuid4()).replace('-', '_')}",
entrypoint=entrypoint,
command=command,
handler=handle_route(entrypoint, command))
app.change_routes(app["user_routes"] + [route])
except (InvalidRouteError, KeyError) as exc:
return web.Response(status=422, reason="Invalid Route")
else:
app["user_routes"].append(route)
return web.json_response({"id": route.id,
"method": route.method,
"url_pattern": route.path,
"entrypoint": route.entrypoint,
"command": route.command,
"index": len(app["user_routes"]) - 1},
status=201)
return _append_route
def delete_route(app):
async def _delete_route(request):
"""Delete the given Kapow! route."""
id = request.match_info["id"]
routes = [r for r in app["user_routes"] if r.id != id]
if len(routes) == len(app["user_routes"]):
return web.Response(status=404, reason="Not Found")
else:
app.change_routes(routes)
app["user_routes"] = routes
return web.Response(status=204, reason="No Content")
return _delete_route
########################################################################
# aiohttp webapp #
########################################################################
async def run_init_script(app, scripts, interactive):
"""
Run the init script if given, then wait for the shell to finish.
"""
if not scripts:
# No script given
if not interactive:
return
else:
cmd = "/bin/bash"
else:
def build_filenames():
for filename in scripts:
yield shlex.quote(filename)
yield "<(echo)"
filenames = " ".join(build_filenames())
if interactive:
cmd = f"/bin/bash --init-file <(cat {filenames})"
else:
# cmd = f"/bin/bash <(cat {filenames})"
cmd = f"/bin/bash <(cat {filenames})"
shell_task = await asyncio.create_subprocess_shell(
cmd,
executable="/bin/bash",
env={**os.environ,
"KAPOW_URL": "http://localhost:8081"
})
await shell_task.wait()
if interactive:
await app.cleanup()
os._exit(shell_task.returncode)
class InvalidRouteError(Exception):
pass
class DynamicApplication(web.Application):
"""
A wrapper around `aiohttp.web.Application` allowing changing routes
dynamically.
This is not safe as mentioned here:
https://github.com/aio-libs/aiohttp/issues/3238.
On the other hand this is a PoC anyway...
"""
def change_routes(self, routes):
router = UrlDispatcher()
try:
for route in routes:
router.add_route(route.method,
route.path,
route.handler,
name=route.id)
except Exception as exc:
raise InvalidRouteError("Invalid route") from exc
else:
self._router = router
if self._frozen:
self._router.freeze()
KapowRoute = namedtuple('KapowRoute',
('method',
'path',
'id',
'entrypoint',
'command',
'handler'))
async def start_background_tasks(app):
loop = asyncio.get_event_loop()
app["debug_tasks"] = loop.create_task(
run_init_script(app, app["scripts"], app["interactive"]))
async def start_kapow_server(bind, scripts, capem=None, certfile=None,
interactive=False, keyfile=None):
user_app = DynamicApplication(client_max_size=1024 ** 3)
user_app["user_routes"] = list() # [KapowRoute]
user_runner = web.AppRunner(user_app)
await user_runner.setup()
ssl_context = None
if certfile and keyfile and capem:
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.load_verify_locations(cafile=capem)
ssl_context.load_cert_chain(certfile, keyfile=keyfile)
ssl_context.check_hostname = False
ip, port = bind.split(':')
user_site = web.TCPSite(user_runner, ip, int(port),
ssl_context=ssl_context)
await user_site.start()
control_app = web.Application(
client_max_size=1024 ** 3
)
control_app.add_routes([
# Control API
web.get('/routes', get_routes(user_app)),
web.get('/routes/{id}', get_route(user_app)),
web.post('/routes', append_route(user_app)),
web.put('/routes', insert_route(user_app)),
web.delete('/routes/{id}', delete_route(user_app)),
# Data API
web.get('/handlers/{id}/{field:.*}', get_field),
web.put('/handlers/{id}/{field:.*}', set_field),
])
control_app["scripts"] = scripts
control_app["interactive"] = interactive
control_app.on_startup.append(start_background_tasks)
control_runner = web.AppRunner(control_app)
await control_runner.setup()
control_site = web.TCPSite(control_runner, '127.0.0.1', 8081)
await control_site.start()
########################################################################
# Command Line #
########################################################################
@click.group()
@click.pass_context
def kapow(ctx):
"""PyPow! If you can script it, you can HTTP it."""
pass
@kapow.command(help="Start a Kapow! server")
@click.option("--capem", default=None)
@click.option("--certfile", default=None)
@click.option("--keyfile", default=None)
@click.option("--bind", default="0.0.0.0:8080")
@click.option("-i", "--interactive", is_flag=True)
@click.argument("scripts", nargs=-1)
def server(capem, certfile, keyfile, bind, interactive, scripts):
if bool(certfile) ^ bool(keyfile) ^ bool(capem):
print(
"For SSL auth 'capem', 'certfile' and 'keyfile' "
"should be provided."
)
sys.exit(1)
loop = asyncio.get_event_loop()
print(f"[*] Listen PyNow! at: {bind}")
loop.run_until_complete(
start_kapow_server(bind, scripts, capem, certfile, interactive, keyfile))
loop.run_forever()
@kapow.group(help="Manage current server HTTP routes")
def route():
pass
@route.command("add")
@click.option("-c", "--command", nargs=1)
@click.option("-e", "--entrypoint", default="/bin/sh -c")
@click.option("-X", "--method", default="GET")
@click.option("--url", envvar='KAPOW_URL')
@click.argument("url_pattern", nargs=1)
@click.argument("command_file", required=False)
def route_add(url_pattern, entrypoint, command, method, url, command_file):
if command:
# Command is given inline
source = command
elif command_file is None:
# No command
source = ""
elif command_file == '-':
# Read commands from stdin
source = sys.stdin.read()
else:
# Read commands from a file
with open(command_file, 'r', encoding='utf-8') as handler:
source = handler.read()
response = requests.post(f"{url}/routes",
json={"method": method,
"url_pattern": url_pattern,
"entrypoint": entrypoint,
"command": source})
response.raise_for_status()
print(json.dumps(response.json(), indent=2))
@route.command("remove")
@click.option("--url", envvar='KAPOW_URL')
@click.argument("route-id")
def route_remove(route_id, url):
response = requests.delete(f"{url}/routes/{route_id}")
response.raise_for_status()
@route.command("list")
@click.option("--url", envvar='KAPOW_URL')
@click.argument("route-id", nargs=1, required=False, default=None)
def route_list(route_id, url):
if route_id is None:
response = requests.get(f"{url}/routes")
else:
response = requests.get(f"{url}/routes/{route_id}")
response.raise_for_status()
print(json.dumps(response.json(), indent=2))
@kapow.command("set", help="Set data from the current context")
@click.option("--url", envvar='KAPOW_URL')
@click.option("--handler-id", envvar='KAPOW_HANDLER_ID')
@click.argument("path", nargs=1)
@click.argument("value", required=False)
def kapow_set(url, handler_id, path, value):
if value is None:
data = sys.stdin.buffer
else:
data = value.encode('utf-8')
try:
response = requests.put(f"{url}/handlers/{handler_id}{path}",
data=data)
except requests.exceptions.ConnectionError:
return False
else:
response.raise_for_status()
@kapow.command("get", help="Get data from the current context")
@click.option("--url", envvar='KAPOW_URL')
@click.option("--handler-id", envvar='KAPOW_HANDLER_ID')
@click.argument("path", nargs=1)
def kapow_get(url, handler_id, path):
try:
response = requests.get(f"{url}/handlers/{handler_id}{path}",
stream=True)
except requests.exceptions.ConnectionError:
return False
else:
for chunk in response.iter_content(chunk_size=None):
sys.stdout.buffer.write(chunk)
if __name__ == '__main__':
kapow()
|
StarcoderdataPython
|
9641542
|
<reponame>RadarSun/SUSTech-EC2021-A3<filename>main2.py
import numpy as np
from cec2013.cec2013 import *
from modules2 import *
import argparse
import time
def run(func_id):
f = CEC2013(func_id)
D = f.get_dimension()
pop_size = 16 * D
archive = np.empty((0, D))
evaluation_remaind_cnt = f.get_maxfes()
taboo_points = np.empty((0, D))
radius_of_taboo_points = np.array([])
while evaluation_remaind_cnt > 0:
population = initialization(f, 4 * pop_size, taboo_points, radius_of_taboo_points)
evaluation_remaind_cnt -= population.shape[0]
score = np.zeros(population.shape[0])
for i in range(population.shape[0]):
score[i] = f.evaluate(population[i])
id_sorted_pop = np.argsort(score)[::-1]
population = population[id_sorted_pop[:1 * pop_size]]# decrese
tmp_taboo_point = np.empty((0, D)) # use to store the best individuals
tmp_radius_of_taboo_points = np.array([])
start_time = time.time()*1000
for individual in population:
new_population, evaluation_remaind_cnt = cmsa(np.reshape(individual, (-1, D)), evaluation_remaind_cnt, f, np.ones(1) * f.evaluate(individual),
taboo_points, radius_of_taboo_points)
radius_of_this_taboo = np.linalg.norm(new_population[0] - individual, 2)
tmp_radius_of_taboo_points = np.hstack((tmp_radius_of_taboo_points, radius_of_this_taboo))
tmp_taboo_point = np.vstack((tmp_taboo_point, new_population[0]))
archive, evaluation_remaind_cnt = union(archive, new_population, f, evaluation_remaind_cnt, start_time)
taboo_points = tmp_taboo_point
radius_of_taboo_points = tmp_radius_of_taboo_points
return archive
def experiment():
runcnt = 50
problemcnt = 20+1
recall1 = np.zeros((problemcnt-1,runcnt))
recall2 = np.zeros((problemcnt-1,runcnt))
recall3 = np.zeros((problemcnt-1,runcnt))
recall4 = np.zeros((problemcnt-1,runcnt))
recall5 = np.zeros((problemcnt-1,runcnt))
precison1 = np.zeros((problemcnt-1,runcnt))
precison2 = np.zeros((problemcnt-1,runcnt))
precison3 = np.zeros((problemcnt-1,runcnt))
precison4 = np.zeros((problemcnt-1,runcnt))
precison5 = np.zeros((problemcnt-1,runcnt))
f1_stasic1 = np.zeros((problemcnt-1,runcnt))
f1_stasic2 = np.zeros((problemcnt-1,runcnt))
f1_stasic3 = np.zeros((problemcnt-1,runcnt))
f1_stasic4 = np.zeros((problemcnt-1,runcnt))
f1_stasic5 = np.zeros((problemcnt-1,runcnt))
for exp in range(runcnt):
for func_id in range(1, problemcnt):
print('Prblem', func_id, 'RUN', exp)
# ans = main4(func_id)
ans = run(func_id)
recall,precison,f1_stasic = print_result(ans[:, :-2], CEC2013(func_id),func_id,exp)
recall1[func_id-1][exp] = recall[0]
recall2[func_id-1][exp] = recall[1]
recall3[func_id-1][exp] = recall[2]
recall4[func_id-1][exp] = recall[3]
recall5[func_id-1][exp] = recall[4]
precison1[func_id-1][exp] = precison[0]
precison2[func_id-1][exp] = precison[1]
precison3[func_id-1][exp] = precison[2]
precison4[func_id-1][exp] = precison[3]
precison5[func_id-1][exp] = precison[4]
f1_stasic1[func_id-1][exp] = precison[0]
f1_stasic2[func_id-1][exp] = precison[1]
f1_stasic3[func_id-1][exp] = precison[2]
f1_stasic4[func_id-1][exp] = precison[3]
f1_stasic5[func_id-1][exp] = precison[4]
score = np.zeros(ans.shape[0])
f = CEC2013(func_id)
characters1 = np.empty([ans.shape[0],1], dtype = str)
characters2 = np.empty([ans.shape[0],1], dtype = str)
for i in range(ans.shape[0]):
score[i] = f.evaluate(ans[i, :-2])
characters1[i,0] = '='
characters2[i,0] = '@'
adds = np.ones(ans.shape[0],dtype= 'int8')
anss = np.hstack((ans[:,:-2], characters1.reshape(ans.shape[0], 1), score.reshape(ans.shape[0], 1),\
characters2.reshape(ans.shape[0], 1), ans[:,-2].reshape(ans.shape[0], 1), ans[:,-1].reshape(ans.shape[0], 1), adds.reshape(ans.shape[0],1)))
np.savetxt('./results/' + 'problem%03d' % (func_id) + 'run%03d' % (exp+1) + '.dat', anss,fmt = '%s')
# save data to csv
import pandas as pd
for func_id in range(1, problemcnt):
recordeddata = np.zeros((runcnt,3*5))
recordeddata[:,0] = recall1[func_id-1][:]
recordeddata[:,1] = recall2[func_id-1][:]
recordeddata[:,2] = recall3[func_id-1][:]
recordeddata[:,3] = recall4[func_id-1][:]
recordeddata[:,4] = recall5[func_id-1][:]
recordeddata[:,5] = precison1[func_id-1][:]
recordeddata[:,6] = precison2[func_id-1][:]
recordeddata[:,7] = precison3[func_id-1][:]
recordeddata[:,8] = precison4[func_id-1][:]
recordeddata[:,9] = precison5[func_id-1][:]
recordeddata[:,10] = f1_stasic1[func_id-1][:]
recordeddata[:,11] = f1_stasic2[func_id-1][:]
recordeddata[:,12] = f1_stasic3[func_id-1][:]
recordeddata[:,13] = f1_stasic4[func_id-1][:]
recordeddata[:,14] = f1_stasic5[func_id-1][:]
datafram_recordeddata = pd.DataFrame(recordeddata)
recoed_filename = "./recorded_data/fuction" + str(func_id) + ".csv"
datafram_recordeddata.to_csv(recoed_filename)
# save data to txt
import sys
newfile = 'result.txt'
data = open(newfile,'a',encoding="utf-8")
for func_id in range(1, problemcnt):
strproblem = "+Problem " + str(func_id)
print(strproblem,file=data)
strrecall = "recall: " + str(recall1[func_id-1][:]) + " mean: " + str(np.mean(recall1[func_id-1][:])) + " var: " + str(np.var(recall1[func_id-1][:]))
print(strrecall,file=data)
strrecall = "recall: " + str(recall2[func_id-1][:]) + " mean: " + str(np.mean(recall2[func_id-1][:])) + " var: " + str(np.var(recall2[func_id-1][:]))
print(strrecall,file=data)
strrecall = "recall: " + str(recall3[func_id-1][:]) + " mean: " + str(np.mean(recall3[func_id-1][:])) + " var: " + str(np.var(recall3[func_id-1][:]))
print(strrecall,file=data)
strrecall = "recall: " + str(recall4[func_id-1][:]) + " mean: " + str(np.mean(recall4[func_id-1][:])) + " var: " + str(np.var(recall4[func_id-1][:]))
print(strrecall,file=data)
strrecall = "recall: " + str(recall5[func_id-1][:]) + " mean: " + str(np.mean(recall5[func_id-1][:])) + " var: " + str(np.var(recall5[func_id-1][:]))
print(strrecall,file=data)
runstr = "-------------------"
print(runstr,file=data)
strprecision = "precision: " + str(precison1[func_id-1][:]) + " mean: " + str(np.mean(precison1[func_id-1][:])) + " var: " + str(np.var(precison1[func_id-1][:]))
print(strprecision,file=data)
strprecision = "precision: " + str(precison2[func_id-1][:]) + " mean: " + str(np.mean(precison2[func_id-1][:])) + " var: " + str(np.var(precison2[func_id-1][:]))
print(strprecision,file=data)
strprecision = "precision: " + str(precison3[func_id-1][:]) + " mean: " + str(np.mean(precison3[func_id-1][:])) + " var: " + str(np.var(precison3[func_id-1][:]))
print(strprecision,file=data)
strprecision = "precision: " + str(precison4[func_id-1][:]) + " mean: " + str(np.mean(precison4[func_id-1][:])) + " var: " + str(np.var(precison4[func_id-1][:]))
print(strprecision,file=data)
strprecision = "precision: " + str(precison5[func_id-1][:]) + " mean: " + str(np.mean(precison5[func_id-1][:])) + " var: " + str(np.var(precison5[func_id-1][:]))
print(strprecision,file=data)
runstr = "-------------------"
print(runstr,file=data)
strf1 = "f1_stastic " + str(f1_stasic1[func_id-1][:]) + " mean: " + str(np.mean(f1_stasic1[func_id-1][:])) + " var: " + str(np.var(f1_stasic1[func_id-1][:]))
print(strf1,file=data)
strf1 = "f1_stastic " + str(f1_stasic2[func_id-1][:]) + " mean: " + str(np.mean(f1_stasic2[func_id-1][:])) + " var: " + str(np.var(f1_stasic2[func_id-1][:]))
print(strf1,file=data)
strf1 = "f1_stastic " + str(f1_stasic3[func_id-1][:]) + " mean: " + str(np.mean(f1_stasic3[func_id-1][:])) + " var: " + str(np.var(f1_stasic3[func_id-1][:]))
print(strf1,file=data)
strf1 = "f1_stastic " + str(f1_stasic4[func_id-1][:]) + " mean: " + str(np.mean(f1_stasic4[func_id-1][:])) + " var: " + str(np.var(f1_stasic4[func_id-1][:]))
print(strf1,file=data)
strf1 = "f1_stastic " + str(f1_stasic5[func_id-1][:]) + " mean: " + str(np.mean(f1_stasic5[func_id-1][:])) + " var: " + str(np.var(f1_stasic5[func_id-1][:]))
print(strf1,file=data)
runstr = "\n"
print(runstr,file=data)
if __name__ == '__main__':
experiment()
# parse = argparse.ArgumentParser()
# parse.add_argument('--func_id',default=1,type=int)
# args = parse.parse_args()
# func_id = args.func_id
# func_id = 3
# ans = run(func_id)
# # see_result(ans[:, :-1], CEC2013(func_id))
# print_result(ans[:, :-1], CEC2013(func_id),1,2)
# np.savetxt('points.txt', ans)
|
StarcoderdataPython
|
9662683
|
<reponame>ajstewart/tkp
import logging
import tkp.db
import tkp.db.quality as dbquality
from tkp.quality.restoringbeam import beam_invalid
from tkp.quality.rms import rms_invalid, rms_with_clipped_subregion
from tkp.telescope.lofar.noise import noise_level
from tkp.utility import nice_format
logger = logging.getLogger(__name__)
def reject_check_lofar(accessor, job_config):
lofar_quality_params = job_config['quality_lofar']
low_bound = lofar_quality_params['low_bound']
high_bound = lofar_quality_params['high_bound']
oversampled_x = lofar_quality_params['oversampled_x']
elliptical_x = lofar_quality_params['elliptical_x']
min_separation = lofar_quality_params['min_separation']
if accessor.tau_time == 0:
logger.info("image %s REJECTED: tau_time is 0, should be > 0" % accessor.url)
return dbquality.reject_reasons['tau_time'], "tau_time is 0"
rms_est_sigma = job_config.persistence.rms_est_sigma
rms_est_fraction = job_config.persistence.rms_est_fraction
rms_qc = rms_with_clipped_subregion(accessor.data,
rms_est_sigma=rms_est_sigma,
rms_est_fraction=rms_est_fraction)
noise = noise_level(accessor.freq_eff, accessor.freq_bw, accessor.tau_time,
accessor.antenna_set, accessor.ncore, accessor.nremote, accessor.nintl
)
rms_check = rms_invalid(rms_qc, noise, low_bound, high_bound)
if not rms_check:
logger.info("image %s accepted: rms: %s, theoretical noise: %s" % \
(accessor.url, nice_format(rms_qc),
nice_format(noise)))
else:
logger.info("image %s REJECTED: %s " % (accessor.url, rms_check))
return (dbquality.reject_reasons['rms'], rms_check)
# beam shape check
(semimaj, semimin, theta) = accessor.beam
beam_check = beam_invalid(semimaj, semimin, theta, oversampled_x, elliptical_x)
if not beam_check:
logger.info("image %s accepted: semimaj: %s, semimin: %s" % (accessor.url,
nice_format(semimaj),
nice_format(semimin)))
else:
logger.info("image %s REJECTED: %s " % (accessor.url, beam_check))
return (dbquality.reject_reasons['beam'], beam_check)
# Bright source check
bright = tkp.quality.brightsource.is_bright_source_near(accessor, min_separation)
if bright:
logger.info("image %s REJECTED: %s " % (accessor.url, bright))
return (dbquality.reject_reasons['bright_source'], bright)
|
StarcoderdataPython
|
4816463
|
#!/usr/bin/env python
#pylint: skip-file
from __future__ import print_function
import itertools
import pytest
import types
from .util import RandomComplexString, RandomIP
class Test_PreferenceFlags(object):
def test_DecodeFlags(self):
print()
from pyirobot import CarpetBoost, CleaningPasses, FinishWhenBinFull, EdgeClean, Robot
robot = Robot(RandomIP(), RandomComplexString(64))
# Fake the robot post call
def fake_post(self, cmd, args):
return {
"lang" : 0,
"name" : "Roomba",
"timezone" : "America/Chicago",
"flags" : sum([field.value for field in self._fake_preferences])
}
setattr(robot, "_PostToRobot", types.MethodType(fake_post, robot))
# Test decoding every combination of options
combinations = list(itertools.product(list(CarpetBoost)[1:], list(CleaningPasses)[1:], list(FinishWhenBinFull)[1:], list(EdgeClean)[1:]))
for combo in combinations:
setattr(robot, "_fake_preferences", combo)
print("combo={}".format(combo))
prefs = robot.GetCleaningPreferences()
print("prefs={}".format(prefs))
assert prefs["carpetBoost"] == combo[0]
assert prefs["cleaningPasses"] == combo[1]
assert prefs["finishWhenBinFull"] == combo[2]
assert prefs["edgeClean"] == combo[3]
|
StarcoderdataPython
|
12841231
|
<reponame>easyCZ/UoE-Projects
#!/usr/bin/python
# aggregate-combiner.py
import sys
answers = []
last_user_id = None
MEM_THRESHOLD = 1024 * 1024 * 1024 # 1 GB
def write(user, entries):
if len(entries) > 0:
print('{0}\t{1}'.format(user, ','.join(entries)))
for line in sys.stdin:
user_id, answer_id = line.strip().split('\t', 1)
if (last_user_id and last_user_id != user_id) or sys.getsizeof(answers) > MEM_THRESHOLD:
write(last_user_id, answers)
answers = []
last_user_id = user_id
answers.append(answer_id)
if last_user_id:
write(last_user_id, answers)
|
StarcoderdataPython
|
4979746
|
<gh_stars>0
import logging
import os
from galaxy.model.orm import and_
from tool_shed.util import hg_util
from tool_shed.util import shed_util_common as suc
log = logging.getLogger( __name__ )
class ToolVersionManager( object ):
def __init__( self, app ):
self.app = app
def get_tool_version( self, tool_id ):
context = self.app.install_model.context
return context.query( self.app.install_model.ToolVersion ) \
.filter( self.app.install_model.ToolVersion.table.c.tool_id == tool_id ) \
.first()
def get_tool_version_association( self, parent_tool_version, tool_version ):
"""
Return a ToolVersionAssociation if one exists that associates the two received
tool_versions This function is called only from Galaxy.
"""
context = self.app.install_model.context
return context.query( self.app.install_model.ToolVersionAssociation ) \
.filter( and_( self.app.install_model.ToolVersionAssociation.table.c.parent_id == parent_tool_version.id,
self.app.install_model.ToolVersionAssociation.table.c.tool_id == tool_version.id ) ) \
.first()
def get_version_lineage_for_tool( self, repository_id, repository_metadata, guid ):
"""
Return the tool version lineage chain in descendant order for the received
guid contained in the received repsitory_metadata.tool_versions. This function
is called only from the Tool Shed.
"""
repository = suc.get_repository_by_id( self.app, repository_id )
repo = hg_util.get_repo_for_repository( self.app, repository=repository, repo_path=None, create=False )
# Initialize the tool lineage
version_lineage = [ guid ]
# Get all ancestor guids of the received guid.
current_child_guid = guid
for changeset in hg_util.reversed_upper_bounded_changelog( repo, repository_metadata.changeset_revision ):
ctx = repo.changectx( changeset )
rm = suc.get_repository_metadata_by_changeset_revision( self.app, repository_id, str( ctx ) )
if rm:
parent_guid = rm.tool_versions.get( current_child_guid, None )
if parent_guid:
version_lineage.append( parent_guid )
current_child_guid = parent_guid
# Get all descendant guids of the received guid.
current_parent_guid = guid
for changeset in hg_util.reversed_lower_upper_bounded_changelog( repo,
repository_metadata.changeset_revision,
repository.tip( self.app ) ):
ctx = repo.changectx( changeset )
rm = suc.get_repository_metadata_by_changeset_revision( self.app, repository_id, str( ctx ) )
if rm:
tool_versions = rm.tool_versions
for child_guid, parent_guid in tool_versions.items():
if parent_guid == current_parent_guid:
version_lineage.insert( 0, child_guid )
current_parent_guid = child_guid
break
return version_lineage
def handle_tool_versions( self, tool_version_dicts, tool_shed_repository ):
"""
Using the list of tool_version_dicts retrieved from the Tool Shed (one per changeset
revision up to the currently installed changeset revision), create the parent / child
pairs of tool versions. Each dictionary contains { tool id : parent tool id } pairs.
This function is called only from Galaxy.
"""
context = self.app.install_model.context
for tool_version_dict in tool_version_dicts:
for tool_guid, parent_id in tool_version_dict.items():
tool_version_using_tool_guid = self.get_tool_version( tool_guid )
tool_version_using_parent_id = self.get_tool_version( parent_id )
if not tool_version_using_tool_guid:
tool_version_using_tool_guid = \
self.app.install_model.ToolVersion( tool_id=tool_guid,
tool_shed_repository=tool_shed_repository )
context.add( tool_version_using_tool_guid )
context.flush()
if not tool_version_using_parent_id:
tool_version_using_parent_id = \
self.app.install_model.ToolVersion( tool_id=parent_id,
tool_shed_repository=tool_shed_repository )
context.add( tool_version_using_parent_id )
context.flush()
tool_version_association = \
self.get_tool_version_association( tool_version_using_parent_id,
tool_version_using_tool_guid )
if not tool_version_association:
# Associate the two versions as parent / child.
tool_version_association = \
self.app.install_model.ToolVersionAssociation( tool_id=tool_version_using_tool_guid.id,
parent_id=tool_version_using_parent_id.id )
context.add( tool_version_association )
context.flush()
|
StarcoderdataPython
|
9680546
|
# -*- coding: utf-8 -*-
"""
Unit tests for dim transforms
"""
from __future__ import division
import numpy as np
from holoviews.core.data import Dataset
from holoviews.element.comparison import ComparisonTestCase
from holoviews.util.transform import dim
class TestDimTransforms(ComparisonTestCase):
def setUp(self):
self.linear_ints = np.arange(1, 11)
self.linear_floats = np.arange(1, 11)/10.
self.negative = -self.linear_floats
self.repeating = ['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C', 'A']
self.dataset = Dataset(
(self.linear_ints, self.linear_floats, self.negative, self.repeating),
['int', 'float', 'negative', 'categories']
)
# Unary operators
def test_abs_transform(self):
self.assertEqual(abs(dim('negative')).apply(self.dataset), self.linear_floats)
def test_neg_transform(self):
self.assertEqual(-dim('negative').apply(self.dataset), self.linear_floats)
# Binary operators
def test_add_transform(self):
self.assertEqual((dim('float')+1).apply(self.dataset), self.linear_floats+1)
def test_div_transform(self):
self.assertEqual((dim('int')/10.).apply(self.dataset), self.linear_floats)
def test_floor_div_transform(self):
self.assertEqual((dim('int')//2).apply(self.dataset), self.linear_ints//2)
def test_mod_transform(self):
self.assertEqual((dim('int')%2).apply(self.dataset), self.linear_ints%2)
def test_mul_transform(self):
self.assertEqual((dim('float')*10.).apply(self.dataset), self.linear_ints)
def test_pow_transform(self):
self.assertEqual((dim('int')**2).apply(self.dataset), self.linear_ints**2)
def test_sub_transform(self):
self.assertEqual((dim('int')-10).apply(self.dataset), self.linear_ints-10)
# Reverse binary operators
def test_radd_transform(self):
self.assertEqual((1+dim('float')).apply(self.dataset), 1+self.linear_floats)
def test_rdiv_transform(self):
self.assertEqual((10./dim('int')).apply(self.dataset), 10./self.linear_ints)
def test_rfloor_div_transform(self):
self.assertEqual((2//dim('int')).apply(self.dataset), 2//self.linear_ints)
def test_rmod_transform(self):
self.assertEqual((2%dim('int')).apply(self.dataset), 2%self.linear_ints)
def test_rmul_transform(self):
self.assertEqual((10.*dim('float')).apply(self.dataset), self.linear_ints)
def test_rsub_transform(self):
self.assertEqual((10-dim('int')).apply(self.dataset), 10-self.linear_ints)
# NumPy operations
def test_ufunc_transform(self):
self.assertEqual(np.sin(dim('float')).apply(self.dataset), np.sin(self.linear_floats))
def test_astype_transform(self):
self.assertEqual(dim('int').astype(str).apply(self.dataset),
self.linear_ints.astype(str))
def test_cumsum_transform(self):
self.assertEqual(dim('float').cumsum().apply(self.dataset),
self.linear_floats.cumsum())
def test_max_transform(self):
self.assertEqual(dim('float').max().apply(self.dataset),
self.linear_floats.max())
def test_min_transform(self):
self.assertEqual(dim('float').min().apply(self.dataset),
self.linear_floats.min())
def test_round_transform(self):
self.assertEqual(dim('float').round().apply(self.dataset),
self.linear_floats.round())
def test_sum_transform(self):
self.assertEqual(dim('float').sum().apply(self.dataset),
self.linear_floats.sum())
def test_std_transform(self):
self.assertEqual(dim('float').std().apply(self.dataset),
self.linear_floats.std())
def test_var_transform(self):
self.assertEqual(dim('float').var().apply(self.dataset),
self.linear_floats.var())
# Custom functions
def test_norm_transform(self):
self.assertEqual(dim('int').norm().apply(self.dataset),
(self.linear_ints-1)/9.)
def test_bin_transform(self):
self.assertEqual(dim('int').bin([0, 5, 10]).apply(self.dataset),
np.array([2.5, 2.5, 2.5, 2.5, 2.5, 7.5, 7.5, 7.5, 7.5, 7.5]))
def test_bin_transform_with_labels(self):
self.assertEqual(dim('int').bin([0, 5, 10], ['A', 'B']).apply(self.dataset),
np.array(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']))
def test_categorize_transform_list(self):
self.assertEqual(dim('categories').categorize(['circle', 'square', 'triangle']).apply(self.dataset),
np.array((['circle', 'square', 'triangle']*3)+['circle']))
def test_categorize_transform_dict(self):
self.assertEqual(dim('categories').categorize({'A': 'circle', 'B': 'square', 'C': 'triangle'}).apply(self.dataset),
np.array((['circle', 'square', 'triangle']*3)+['circle']))
def test_categorize_transform_dict_with_default(self):
self.assertEqual(dim('categories').categorize({'A': 'circle', 'B': 'square'}, default='triangle').apply(self.dataset),
np.array((['circle', 'square', 'triangle']*3)+['circle']))
# Complex expressions
def test_multi_operator_expression(self):
self.assertEqual((((dim('float')-2)*3)**2).apply(self.dataset),
((self.linear_floats-2)*3)**2)
def test_multi_dim_expression(self):
self.assertEqual((dim('int')-dim('float')).apply(self.dataset),
self.linear_ints-self.linear_floats)
# Repr method
def test_dim_repr(self):
self.assertEqual(repr(dim('float')), "'float'")
def test_unary_op_repr(self):
self.assertEqual(repr(-dim('float')), "-dim('float')")
def test_binary_op_repr(self):
self.assertEqual(repr(dim('float')*2), "dim('float')*2")
def test_reverse_binary_op_repr(self):
self.assertEqual(repr(1+dim('float')), "1+dim('float')")
def test_ufunc_expression_repr(self):
self.assertEqual(repr(np.log(dim('float'))), "np.log(dim('float'))")
def test_custom_func_repr(self):
self.assertEqual(repr(dim('float').norm()), "dim('float').norm()")
def test_multi_operator_expression_repr(self):
self.assertEqual(repr(((dim('float')-2)*3)**2),
"((dim('float')-2)*3)**2")
# Applies method
def test_multi_dim_expression_applies(self):
self.assertEqual((dim('int')-dim('float')).applies(self.dataset),
True)
def test_multi_dim_expression_not_applies(self):
self.assertEqual((dim('foo')-dim('bar')).applies(self.dataset),
False)
def test_multi_dim_expression_partial_applies(self):
self.assertEqual((dim('int')-dim('bar')).applies(self.dataset),
False)
|
StarcoderdataPython
|
5130046
|
<filename>django_photo_gallery/app/migrations/0008_auto_20180820_2032.py<gh_stars>0
# Generated by Django 2.0.4 on 2018-08-20 20:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20180817_1423'),
]
operations = [
migrations.AddField(
model_name='fooditem',
name='price2',
field=models.DecimalField(decimal_places=2, max_digits=8, null=True),
),
migrations.AddField(
model_name='fooditem',
name='price3',
field=models.DecimalField(decimal_places=2, max_digits=8, null=True),
),
migrations.AlterField(
model_name='foodcategory',
name='description',
field=models.TextField(max_length=1024, null=True),
),
migrations.AlterField(
model_name='fooditem',
name='description',
field=models.TextField(blank=True, max_length=1024),
),
]
|
StarcoderdataPython
|
6656619
|
# -*- coding: utf-8 -*-
from util import Log
def remove(absFilePath, realPath = ''):
with open(absFilePath, "rb") as tmpFile:
content = tmpFile.read()
if len(content) < 3:
return 0
if content[0:3] == b'\xef\xbb\xbf':
content = content[3:]
with open(absFilePath, "wb+") as tmpFile:
tmpFile.write(content)
if realPath != '':
Log.printDetailln("[Bom] removed: %s" % (realPath))
return 0
|
StarcoderdataPython
|
1687427
|
import os
from collections import defaultdict
"""
Run from actual_data directory to get the average runtimes for each actual data test
"""
files_for_avg = 3
valid_strs = ["data{0}.acp".format(i) for i in range(1, files_for_avg + 1)]
def is_file_path_valid_str(path):
for valid_str in valid_strs:
try:
idx = path.index(valid_str)
return path[0:idx]
except:
pass
return None
for ref_path in os.listdir('./'):
try:
runtimes = defaultdict(float)
for file_path in os.listdir('./' + ref_path + '/igenomics_data'):
complete_file_path = './' + ref_path + '/igenomics_data/' + file_path
prefix_str = is_file_path_valid_str(complete_file_path)
if prefix_str is not None:
with open(complete_file_path) as my_file:
for line_num, line in enumerate(my_file.readlines()):
if line_num == 1:
line_comps = line.split('\t')
runtime = float(line_comps[2])
runtimes[prefix_str] += runtime
break
for key in runtimes.keys():
print("{0}: {1}".format(key, round(runtimes[key] / files_for_avg,
2)))
except Exception:
pass
|
StarcoderdataPython
|
6480061
|
<filename>unit.py
import database as d
import jobs as j
import incubator as inc
import bigData as big
import staff
import copy
import math
#Not all units, but all units you can create (no house, church)
def all_units():
unit_list = [Farm, Mill, Brewery, Bakery, Lumberyard, Joinery]
return unit_list
#units use Business's money
class Unit(object):
unitType = "genericUnit"
character = "X"
locality = None #should be a locality
location = () #(x,y), being indices on the localMap.
business = None
stock = None
output = None
def __init__ (self, unitName, unitLocality, unitLocationTuple, business):
self.name = unitName
self.bigdata = big.bigdata(self)
stockLength = range(len(d.getMaterials()))
missionsLength = range(len(d.getUnitMissions()))
self.locality = unitLocality
self.location = unitLocationTuple
self.business = business
self.jobList = []
self.incubator = inc.incubator(self)
self.bigdata = big.bigdata(self)
self.stock = [0 for material in stockLength]
self.output = [0 for material in stockLength]
self.tech = [1 for material in stockLength]
#current prices
self.price = [0 for material in stockLength]
self.purchases = [0 for material in stockLength]
#yesterday's number of sales for each item
self.sales = [0 for material in stockLength]
self.failSales = [0 for material in stockLength]
self.transports = [0 for material in stockLength]
self.failTransports = [0 for material in stockLength]
#Direct Materials Cost of a SINGLE instance of each product
self.DMC = [0 for material in stockLength]
# self.orders = [0 for material in stockLength]
self.crafted = [0 for material in stockLength]
self.planted = [0 for material in stockLength]
self.harvested = [0 for material in stockLength]
self.missions = [False for mission in missionsLength]
self.can_make = [False for material in stockLength]
self.laborTotal = 0
# self.rentTotal = 0
self.customers = []
def toString(self):
print("------------------------------------------")
print(self.name + " is a " + self.unitType + ".")
print("\nCurrent stocks:")
print("Stock:", self.stock)
print("Output:", self.output)
print("\nCurrent crafted:")
print("Crafted: ", self.crafted)
print("Planted: ", self.planted)
print("\nCurrent prices:")
print("Direct material costs:", self.DMC)
print("Last week's labor costs:", self.laborTotal)
print("Sales:", self.sales)
print("Failsales: ", self.failSales)
print("Demand: ", [self.sales[i] + self.failSales[i] for i in range(len(self.sales))])
print("Prices:", self.price)
def getPrice(self):
return self.price
def complain(self, who):
failSale = [0 for i in range(len(d.getMaterials()))]
cost = 0
self.customers.append((who, failSale, copy.copy(self.output), cost, who.capital, False))
def sell(self, who, amounts):
wishamounts = copy.copy(amounts)
wishcost = sum(self.price[i] * wishamounts[i] for i in range(len(self.output)))
for i in range(len(self.output)):
if amounts[i] > self.output[i]:
amounts[i] = math.floor(self.output[i])
cost = sum(self.price[i] * amounts[i] for i in range(len(self.output)))
#verify
if who.canAfford(cost):
#sale
who.addCapital(-cost)
self.business.addCash(cost)
for i in range(len(self.output)):
self.addOutput(i, -amounts[i])
who.addInventory(i, amounts[i])
self.addSales(i, amounts[i])
self.addFailSales(i, wishamounts[i] - amounts[i])
if sum(amounts) > 0:
sold = True
else:
sold = False
#customers is for bigData- cleared during *PHASE*
self.customers.append((who, wishamounts, wishcost, copy.copy(amounts), cost, copy.copy(self.output), who.capital, sold))
return (amounts, cost, sold)
#calculate the price of a single material, without changing it in place- i = materialIndex
def priceCalc(self, i):
#natural rate of profit- 4% return on capital
nrp = 1.04
#K is a constant weight- adjust if needed. At .5 one period is the half life.
K = .5
#natural price
if d.getMaterials()[i] in d.planted:
#2 days, tech same for planting and harvesting
ratio = self.incubator.ratios[d.getMaterials()[i]]
labor = 2 / (self.tech[i] * ratio)
else:
labor = 1 / self.tech[i]
naturalPrice = (self.DMC[i] + labor) * nrp
#if never sold before
if self.price[i] == 0:
price = round(naturalPrice, 2)
oPrice = price
#if sold before
else:
#optimal price, set price.
demand = self.sales[i] + self.failSales[i]
oPrice = (demand / self.output[i]) * naturalPrice
priceAdjustment = (K * (oPrice - self.price[i]))
price = round(self.price[i] + priceAdjustment, 2)
return (price, oPrice, naturalPrice)
#call AFTER transferring but BEFORE transporting. For stores, after restocking and before selling. (rest)
#priceGen gives new prices every period for each item based on the earlier price and
#the "optimal price" that it should trend towards.
def priceGen(self):
yDayNum = self.getDayNum() - 1
oPrice = [0 for i in d.getMaterials()]
naturalPrice = [0 for i in d.getMaterials()]
for i in range(len(self.price)):
if self.output[i] != 0:
(self.price[i], oPrice[i], naturalPrice[i]) = self.priceCalc(i)
#debug
# if self.name in ("<NAME>", "<NAME>", "<NAME>"):
# print(self.name)
# print("DMC: ", self.DMC)
# print("sales: ", self.sales)
# print("output: ", self.output)
# print("nPrice: ", naturalPrice)
# print("oPrice: ", oPrice)
# print("price: ", self.price)
# print("")
self.resetSales()
self.resetCustomers()
def growingPlants(self, materialIndex):
return self.incubator.getGrowing(d.getMaterials()[materialIndex])
def ripePlants(self, materialIndex):
return self.incubator.getRipe(d.getMaterials()[materialIndex])
def plantSeeds(self, materialIndex, amount):
# if self.stock[materialIndex] <= amount:
# amount = self.stock[materialIndex]
# self.stock[materialIndex] -= amount
self.incubator.plant(d.getMaterials()[materialIndex], amount)
def harvest(self, materialIndex, amount):
if self.ripePlants(materialIndex) >= amount:
amount = self.incubator.harvest(d.getMaterials()[materialIndex], amount)
self.addCrafted(materialIndex, amount)
self.addStock(materialIndex, amount)
def getName(self):
return self.name
def getEmployees(self):
employeeList = []
for job in self.jobList:
employeeList += job.getEmployees()
return employeeList
def get_emp_dict(self):
empDict = {}
for job in self.jobList:
empDict[job] = job.getEmployees()
return empDict
def getLocality(self):
return self.locality
def getLocation(self):
return self.location
def getBusiness(self):
return self.business
def getDayNum(self):
return self.locality.getDayNum()
def getDMC(self):
return self.DMC
# def getIsMarket(self):
# return self.isMarket
#for now, skills don't matter. But they will.
def getJobs(self, interviewee):
jobList = copy.copy(self.jobList)
return jobList
def getJobList(self):
return self.jobList
def getOutput(self, materialIndex):
return (self.output[materialIndex])
def getAllOutput(self):
return self.output
def getMissions(self):
return self.missions
def getStock(self, materialIndex):
return (self.stock[materialIndex])
def getAllStock(self):
return self.stock
def getTech(self, materialIndex):
return self.tech[materialIndex]
def getUnitType(self):
return self.unitType
def setBusiness(self, newBusiness):
self.business = newBusiness
#addPurchase, addSales, addFailSales. This is stupid.
def addPurchase(self, materialIndex, amount):
self.purchases[materialIndex] += amount
def addSales(self, materialIndex, amount):
self.sales[materialIndex] += amount
def addFailSales(self, materialIndex, amount):
self.failSales[materialIndex] += amount
def addTransports(self, materialIndex, amount):
self.transports[materialIndex] += amount
def addFailTransports(self, materialIndex, amount):
self.failTransports[materialIndex] += amount
def getTotalDemand(self):
demand = []
for i in range(len(self.sales)):
demand.append(self.sales[i] + self.failSales[i] + self.transports[i] + self.failTransports[i])
return demand
def addStock(self, materialIndex, amount):
self.stock[materialIndex] += amount
def addOutput(self, materialIndex, amount):
self.output[materialIndex] += amount
def addCrafted(self, materialIndex, amount):
self.crafted[materialIndex] += amount
def addPlanted(self, materialIndex, amount):
self.planted[materialIndex] += amount
def addHarvested(self, materialIndex, amount):
self.harvested[materialIndex] += amount
def getCrafted(self):
return self.crafted
#used for displaying production in matplotlib
def getProduction(self):
return ([0,1,2,3,4,5,6,7,8], self.crafted)
def getSales(self):
return ([0,1,2,3,4,5,6,7,8], self.sales)
def addJob(self, job):
self.jobList.append(job)
def removeJob(self, job):
self.jobList.remove(job)
def setDMC(self, materialIndex, DMCost):
self.DMC[materialIndex] = DMCost
def setLaborTotal(self, laborTotal):
self.laborTotal = laborTotal
def resetPurchases(self):
self.purchases = [0 for i in self.purchases]
def resetSales(self):
self.sales = [0 for i in self.sales]
self.failSales = [0 for i in self.failSales]
self.transports = [0 for i in self.transports]
self.failTransports = [0 for i in self.failTransports]
def resetCrafted(self):
self.crafted = [0 for i in self.crafted]
def resetPlanted(self):
self.planted = [0 for i in self.planted]
def resetHarvested(self):
self.harvested = [0 for i in self.harvested]
def resetCustomers(self):
self.customers = []
def getRevenue(self):
revenue = []
for i in range(len(self.sales)):
thisRev = round(self.sales[i] * self.price[i], 2)
revenue.append(thisRev)
return revenue
def dailyRevenue(self):
materials = d.getMaterials()
revenue = self.getRevenue()
noSales = True
toString = ("\n" + self.name + " made")
for i in range(len(revenue)):
if revenue[i] != 0:
if not noSales:
toString += ","
toString += (
" $" + str(revenue[i]) +
" from " + str(self.sales[i]) +
"/" + str(self.sales[i] + self.failSales[i]) +
" sales of " + materials[i]
)
noSales = False
toString += "."
if noSales:
toString = ("\n" + self.name + " made no sales today.")
return toString
def dailyCrafted(self):
materials = d.getMaterials()
toString = ("\n" + self.name + " created")
noCrafted = True
for i in range(len(self.crafted)):
if self.crafted[i] != 0:
if not noCrafted:
toString += ","
toString += (
" " +
str(self.crafted[i]) +
" " + str(materials[i])
)
noCrafted = False
toString += "."
if noCrafted:
toString = ("\n" + self.name + " didn't craft anything today.")
return toString
def dailyExpenses(self):
pass
class Manufactury(Unit):
unitType = "Manufactury"
character = "Manu"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Unit.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.missions[d.MANU_INDEX] = True
self.staff = staff.manu_staff(self)
class Farm(Manufactury):
unitType = "Farm"
character = "F"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.GRAIN_INDEX] = True
self.tech[d.GRAIN_INDEX] = 4.5
self.stock[d.GRAIN_INDEX] = 50
# self.DMC[d.GRAIN_INDEX] = 1
self.failSales[d.GRAIN_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
#20-30 kg flour per hour- ~440 lb per 8 hours
class Mill(Manufactury):
unitType = "Mill"
character = "M"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.FLOUR_INDEX] = True
self.tech[d.FLOUR_INDEX] = 440
self.missions[d.MANU_INDEX] = True
self.stock[d.GRAIN_INDEX] = 50
# self.DMC[d.GRAIN_INDEX] = 1
self.failSales[d.FLOUR_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
class Brewery(Manufactury):
unitType = "Brewery"
character = "b"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.BEER_INDEX] = True
self.tech[d.BEER_INDEX] = 40
self.missions[d.MANU_INDEX] = True
self.stock[d.GRAIN_INDEX] = 50
# self.DMC[d.GRAIN_INDEX] = 1
self.failSales[d.BEER_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
class Bakery(Manufactury):
unitType = "Bakery"
character = "B"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.BREAD_INDEX] = True
self.tech[d.BREAD_INDEX] = 60
self.missions[d.MANU_INDEX] = True
self.missions[d.STORE_INDEX] = True
self.stock[d.FLOUR_INDEX] = 50
# self.DMC[d.FLOUR_INDEX] = 1
self.failSales[d.BREAD_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
class Lumberyard(Manufactury):
unitType = "Lumberyard"
character = "L"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.WOOD_INDEX] = True
self.tech[d.WOOD_INDEX] = 50
self.missions[d.MANU_INDEX] = True
self.stock[d.WOOD_INDEX] = 50
# self.DMC[d.WOOD_INDEX] = 1
self.failSales[d.WOOD_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
class Joinery(Manufactury):
unitType = "Joinery"
character = "J"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.CHAIR_INDEX] = True
self.can_make[d.TABLE_INDEX] = True
self.tech[d.CHAIR_INDEX] = 1
self.tech[d.TABLE_INDEX] = 1
self.missions[d.MANU_INDEX] = True
self.missions[d.STORE_INDEX] = True
self.stock[d.WOOD_INDEX] = 50
# self.DMC[d.WOOD_INDEX] = 1
self.failSales[d.CHAIR_INDEX] = 500
self.failSales[d.TABLE_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
class House(Unit):
unitType = "Home"
character = "H"
def __init__(self, unitLocality, unitLocationTuple, business=None, unitName="House"):
Unit.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.missions[d.HOME_INDEX] = True
self.tenants = set()
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
def addTenant(self, tenant):
self.tenants.add(tenant)
def removeTenant(self, tenant):
self.tenants.remove(tenant)
#the ai don't need warehouses, players probably do.
class Warehouse(Unit):
unitType = "Warehouse"
character = "W"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Unit.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.missions[d.MANU_INDEX] = True
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
class Church(Unit):
unitType = "Church"
character = "C"
def __init__(self, unitName, unitLocality, unitLocationTuple, business, religion):
Unit.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.religion = religion
self.missions[d.CHURCH_INDEX] = True
self.flock = []
self.attendance = []
self.staff = staff.church_staff(self)
# self.manager = j.Manager(business, self, 41)
d.addChurch(self)
if self.business is not None:
self.business.addUnit(self)
def attend(self, member):
self.attendance.append(member)
def getAttendance(self):
return self.attendance
def resetAttendance(self):
self.attendance = []
def getReligion(self):
return self.religion
def getFlock(self):
return self.flock
def addMember(self, member):
self.flock.append(member)
|
StarcoderdataPython
|
3278789
|
from PySide.QtCore import *
from PySide.QtGui import * # importo todas las funciones de pyside
class MatrixDialog(QDialog):
def __init__(self, ValoresIniciales, parent=None):
super(MatrixDialog, self).__init__(parent)
self.setGeometry(QRect(100, 100, 600, 200))
Matrix = QTableWidget(len(ValoresIniciales[0]), len(ValoresIniciales), self)
Matrix.setGeometry(QRect(0, 0, 600, 200))
Matrix.setHorizontalHeaderLabels(['1', '2', '3', '4', '5', '6'])
Matrix.setVerticalHeaderLabels(['P [Pa]', 'T [K]', 'rho [kg/m3]', 'V [m3/kg]', 'S [J/kgK]'])
for i in range(len(ValoresIniciales)):
for j in range(len(ValoresIniciales[i])):
Matrix.setItem(j, i, QTableWidgetItem(str(ValoresIniciales[i][j])))
|
StarcoderdataPython
|
4826717
|
<gh_stars>1-10
from abc import abstractmethod
from pathlib import Path
from titlecase import titlecase
from modules.ImageMaker import ImageMaker
class CardType(ImageMaker):
"""
This class describes an abstract card type. A CardType is a subclass of
ImageMaker, because all CardTypes are designed to create title cards. This
class outlines the requirements for creating a custom type of title card.
All subclasses of CardType must implement this class's abstract properties
and methods in order to work with the TitleCardMaker. However, not all
CardTypes need to use every argument of these methods. For example, the
StandardTitleCard utilizes most all customizations for a title card (i.e.
custom fonts, colors, sizing, season titles, etc.), while a
StarWarsTitleCard doesn't use anything except the episode title and number.
"""
"""Default case string for all title text"""
DEFAULT_FONT_CASE = 'upper'
"""Mapping of 'case' strings to format functions"""
CASE_FUNCTIONS = {
'source': str,
'upper': str.upper,
'lower': str.lower,
'title': titlecase,
'blank': lambda _: '',
}
"""Default episode text format string, can be overwritten by each class"""
EPISODE_TEXT_FORMAT = 'EPISODE {episode_number}'
"""Whether this class uses unique source images for card creation"""
USES_UNIQUE_SOURCES = True
"""Standard size for all title cards"""
TITLE_CARD_SIZE = '3200x1800'
"""Standard blur effect to apply to spoiler-free images"""
BLUR_PROFILE = '0x60'
@property
@abstractmethod
def TITLE_CHARACTERISTICS(self) -> dict:
"""
Characteristics of title splitting for this card type. Must have keys
for max_line_width, max_line_count, and top_heavy. See `Title` class
for details.
"""
raise NotImplementedError(f'All CardType objects must implement this')
@property
@abstractmethod
def ARCHIVE_NAME(self) -> str:
"""How to name archive directories for this type of card"""
raise NotImplementedError(f'All CardType objects must implement this')
@property
@abstractmethod
def TITLE_FONT(self) -> str:
"""
Standard font (full path or ImageMagick recognized font name) to use for
the episode title text
"""
raise NotImplementedError(f'All CardType objects must implement this')
@property
@abstractmethod
def TITLE_COLOR(self) -> str:
"""Standard color to use for the episode title text"""
raise NotImplementedError(f'All CardType objects must implement this')
@property
@abstractmethod
def FONT_REPLACEMENTS(self) -> dict:
"""Standard font replacements for the episode title font"""
raise NotImplementedError(f'All CardType objects must implement this')
@property
@abstractmethod
def USES_SEASON_TITLE(self) -> bool:
"""Whether this class uses season titles for the purpose of archives"""
raise NotImplementedError(f'All CardType objects must implement this')
@abstractmethod
def __init__(self) -> None:
"""
Construct a new CardType. Must call super().__init__() to initialize the
parent ImageMaker class (for PreferenceParser and ImageMagickInterface
objects).
"""
super().__init__()
@staticmethod
@abstractmethod
def is_custom_font() -> bool:
"""
Abstract method to determine whether the given font characteristics
indicate the use of a custom font or not.
:returns: True if a custom font is indicated, False otherwise.
"""
raise NotImplementedError(f'All CardType objects must implement this')
@staticmethod
@abstractmethod
def is_custom_season_titles() -> bool:
"""
Abstract method to determine whether the given season characteristics
indicate the use of a custom season title or not.
:returns: True if a custom season title is indicated, False otherwise.
"""
raise NotImplementedError(f'All CardType objects must implement this')
@abstractmethod
def create(self) -> None:
"""
Abstract method to create the title card outlined by the CardType. All
implementations of this method should delete any intermediate files.
"""
raise NotImplementedError(f'All CardType objects must implement this')
|
StarcoderdataPython
|
227642
|
<filename>setup.py<gh_stars>0
from setuptools import setup
setup (name = 'python_slack_client',
packages = ['python_slack_client'],
version = '0.0.1',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/Geam/python_slack_client',
description = 'Slack client for terminal write in python',
licence = 'MIT',
install_requires = ['slacksocket >= 0.7']
)
|
StarcoderdataPython
|
1628503
|
from flask import session
from collections import UserDict
class SessionCredentialStore(UserDict):
def __init__(self):
super().__init__()
self.session = session
def __setitem__(self, key, value):
self.session[key] = value
def __getitem__(self, key):
return self.session[key]
def __delitem__(self, key):
return self.session.pop(key)
def __contains__(self, key):
return key in self.session
def __repr__(self):
return 'SessionStore: {}'.format(str(self.__class__))
|
StarcoderdataPython
|
239103
|
import sqlite3
import os
import time
import logging
import datetime
from sys import argv
from sl_signature import SL_Signature
'''
@author:jzf
@date: 2019-11-20
@desc: 仓库模块
'''
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
#logging.basicConfig(filename='Storage.log', level=logging.DEBUG, format=LOG_FORMAT)
#fileHandler = logging.FileHandler(filename='Storage.log',encoding="utf-8")
#logging.getLogger().addHandler(fileHandler)
DBName = "Storage.db"
def TimeStampToTime(timestamp = 0):
timeStruct = time.localtime(timestamp)
return time.strftime('%Y-%m-%d %H:%M:%S',timeStruct)
class SL_Storage:
#存储类
def __init__(self, rootdir, DBName = None):
self.logger = logging.getLogger("Storage")
fileHandler = logging.FileHandler(filename='Storage.log',encoding="utf-8")
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
self.logger.setLevel(logging.INFO)
self.rootdir = rootdir
if DBName is None :
#DBName = os.path.basename(os.path.abspath(rootdir))
#DBName += ".db"
DBName = "Storage.db"
DBName = os.path.abspath(rootdir)+os.sep+".showlib" +os.sep+ DBName
self.DBName = DBName
print("dname ="+DBName)
self.conn = sqlite3.connect(DBName)
self.cur = self.conn.cursor()
self.cur.execute('''create table IF NOT EXISTS StorageLib(
name varchar(256) not null,
hash varchar(256) not null,
size decimal(19,2) not null,
idev varchar(256) not null,
inode varchar(256) not null,
path text not null,
mtime datetime not null)
''')
self.conn.commit()
self.signature = SL_Signature(self.rootdir)
def __del__(self):
print('close')
self.conn.close()
def create_storage(self):
'''
创建一个.showlib文件夹
创建一个config.xml,生成仓库的uuid等基本信息
'''
pass
def open_storage(self):
'''
从config.xml中读取配置信息
'''
pass
def delete_stroage(self):
'''
删除仓库
'''
pass
def add_path(self):
pass
def del_path(self):
pass
def scan_path(self,root_path = None,rescan = False):
'''
扫描路径,生成db
'''
RootPath = os.path.abspath(self.rootdir)
if root_path is not None:
RootPath = os.path.abspath(self.root_path)
self.logger.debug(RootPath)
records = []
recordset = self.GetRecords()
for root, dirs, files in os.walk(RootPath) :
print(root)
if os.path.basename(root) == ".showlib":
continue
for name in files :
if root+os.sep+name == self.DBName:
continue
if rescan == False:
if self.IsResourceChange(root,name,recordset) != True:
continue
print(name)
record = self.GenRecord(root,name)
records.append(record)
return records
def IsResourceChange(self,dir,name,records):
rc_stat = os.stat(dir+os.sep+name)
record = None
for item in records:
if item[4] == str(rc_stat.st_ino):
record = item
if record is None:
return True
elif record[6] != TimeStampToTime(rc_stat.st_mtime):
return True
else:
return False
def InsertToDB(self,record):
#将记录插入到数据库
try:
self.conn.execute('''insert into StorageLib (name, hash, size, idev, inode, path, mtime)
values(?,?,?,?,?,?,?)''',tuple(record))
self.conn.commit()
self.logger.info(record)
except Exception as e:
self.logger.debug("SL_Storage %s path is %s" %(e,record[5]))
self.cur.close()
self.conn.close()
self.conn = sqlite3.connect(self.DBName)
self.cur = self.conn.cursor()
def InsertDB_Signature_Records(self, recordset):
if len(recordset) == 0:
return
records = []
hash_list = self.signature.GetHashList()
for item in recordset:
# name hash size
bexsit = False
for hash in hash_list:
if item[1] == hash:
bexsit = True
self.logger.info("hash exsit name=%s hash =%s" %(item[0],item[1]))
break
if bexsit == True:
continue
for record in records:
if item[1] == record[1]:
bexsit = True
self.logger.info("hash exsit name=%s hash =%s" %(item[0],item[1]))
break
if bexsit == True:
continue
records.append(tuple(item[0:3]))
self.signature.InsertDB_Records(records)
def InsertDB_Records(self, recordset):
if len(recordset) == 0:
return
records = []
for item in recordset:
records.append(tuple(item))
try:
self.conn.executemany('''insert into StorageLib (name, hash, size, idev, inode, path, mtime)
values(?,?,?,?,?,?,?)''',records)
self.conn.commit()
except Exception as e:
self.logger.warning("%s len is %s" %(e,str(len(records))))
self.cur.close()
self.conn.close()
self.conn = sqlite3.connect(self.DBName)
self.cur = self.conn.cursor()
def GenRecord(self,root,name):
#生成单条记录并插入到库
FileAbsPath = root+'\\'+name
record = self.signature.GenRecord(root,name)
record.append(str(os.stat(FileAbsPath).st_dev))
record.append(str(os.stat(FileAbsPath).st_ino))
record.append(FileAbsPath)
record.append(TimeStampToTime(os.path.getmtime(FileAbsPath)))
return record
#self.InsertToDB(record)
def ListvedioRC(self):
#查找视频资源
l = []
for row in self.conn.execute(''' SELECT * FROM StorageLib where name like '%.avi' or name like '%.MP4' or name like '%.flv' or name like '%.rmvb' or name like '%.wmv' '''):
l.append(row)
self.conn.commit()
return l
def GetRecord_ByHash(self,hash):
l = []
for row in self.conn.execute(''' SELECT * FROM StorageLib where hash = ? ''',hash):
l.append(row)
self.conn.commit()
return l
def GetRecords(self):
self.cur.execute(''' SELECT * FROM StorageLib''')
ls = self.cur.fetchall()
self.conn.commit()
return ls
def ShowRecordsCount(self):
count = None
for row in self.conn.execute(''' SELECT count(*) FROM StorageLib'''):
print(row)
count = row[0]
self.conn.commit()
return count
def GetHashList(self) :
hashlist = []
for row in self.conn.execute('SELECT distinct hash FROM StorageLib'):
hashlist.append(row[0])
self.conn.commit()
return hashlist
def GethashCount(self):
for row in self.conn.execute(''' SELECT count(distinct hash) FROM StorageLib'''):
print(row)
self.conn.commit()
def ShowRepeatHashRC(self) :
l = []
l = self.GetHashList()
sizeCount = 0
for h in l:
lh = []
lh.append(h)
ret = self.GetRecord_ByHash(lh)
if len(ret) > 1:
print('hash is %s num is %d' %(h,len(ret)))
print('=========================================')
index = 0
for item in ret:
print("FileName %s" %item[0])
print(" |__size %s" %item[2])
print(" |__Path %s" %item[5])
print('')
if index != 0 :
sizeCount += item[2]
index += 1
print('')
sizeM = sizeCount/1024.0/1024.0
sizeg = sizeM/1024.0
print('冗余大小约为 %d B == %d M == %d G' %(sizeCount,sizeM,sizeg))
def ShowRepeatNameRC(self) :
pass
def Get_RCPath_byHash(self, hash):
l = []
for row in self.conn.execute(''' SELECT path FROM StorageLib where hash = ? ''',hash):
l.append(row[0])
self.conn.commit()
return l
if __name__ == "__main__" :
# 使用xx.py xxx路径
old = datetime.datetime.now()
print(old.strftime('%Y-%m-%d %H:%M:%S.%f'))
f = SL_Storage(argv[1])
if argv[2] == "scan":
# 扫描文件夹
f.scan_path()
elif argv[2] == "ShowRepeatHashRC":
#显示重复
f.ShowRepeatHashRC()
elif argv[2] == "ListvedioRC":
l = f.ListvedioRC()
print(l)
elif argv[2] == "ShowRecordsCount":
# 显示数目
f.ShowRecordsCount()
f.GethashCount()
else:
print("不能存在方法 "+argv[2]+",您可以利用当前代码自行编写脚本" )
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
print(datetime.datetime.now()-old)
|
StarcoderdataPython
|
3258475
|
<filename>tick/survival/sccs/__init__.py
# License: BSD 3 clause
import tick.base
from .batch_convolutional_sccs import BatchConvSCCS
from .stream_convolutional_sccs import StreamConvSCCS
__all__ = [
"BatchConvSCCS", "StreamConvSCCS"
]
|
StarcoderdataPython
|
6475959
|
<filename>sgr_analysis/sapt/helpers.py<gh_stars>0
"""helpers.py: Functions for parsing data from SAPT0 and ALMO output
files."""
import os.path
import re
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import scipy.stats as sps
from sgr_analysis.analysis_utils import make_file_iterator
KJ_TO_KCAL = 4.184
rsq_cutoff = 0.50
re_number_str = "(-?[0-9]*\.[0-9]*)"
re_number = re.compile(re_number_str)
def fit_line(x, y):
"""Return slope, intercept of best fit line."""
# Remove entries where either x or y is NaN.
clean_data = pd.concat([x, y], 1).dropna(0) # row-wise
(_, x), (_, y) = clean_data.iteritems()
slope, intercept, r, p, stderr = sps.linregress(x, y)
return slope, intercept, r**2
def assertFrameEqual(df1, df2, **kwds):
"""Assert that two dataframes are equal, ignoring ordering of
columns.
"""
return assert_frame_equal(df1.sort(axis=1), df2.sort(axis=1), check_names=True, **kwds)
def linear_regression_2df(df1, df2):
"""Perform a linear regression between all pairs of columns in two
DataFrames.
"""
header = ' slope intercept rsq'
print(header)
template = '{:20.10f} {:20.10f} {:.4f} ({}, {})'
columns = df1.columns
# Are the two DataFrames identical?
try:
assertFrameEqual(df1, df2)
except AssertionError:
samedf = False
else:
samedf = True
rsq_list = []
# If the two DataFrames are the same, avoid double-counting and
# "self-regression".
if samedf:
for xi in range(len(columns)):
for yi in range(xi):
xlabel = columns[xi]
ylabel = columns[yi]
if xlabel != ylabel:
slope, intercept, rsq = fit_line(df1.loc[:, xlabel],
df2.loc[:, ylabel])
rsq_list.append((rsq, (xlabel, ylabel)))
if rsq >= rsq_cutoff:
print(template.format(slope, intercept, rsq, xlabel, ylabel))
# If the two DataFrames aren't the same, go over all columns in both.
else:
for xlabel in columns:
for ylabel in columns:
slope, intercept, rsq = fit_line(df1.loc[:, xlabel],
df2.loc[:, ylabel])
rsq_list.append((rsq, (xlabel, ylabel)))
if rsq >= rsq_cutoff:
print(template.format(slope, intercept, rsq, xlabel, ylabel))
rsq_list = sorted(rsq_list)
for entry in rsq_list:
print(entry)
return
def _add_axis_labels(pg):
"""Add labels to all possible left and bottom Axes.
This doesn't work the way I expect it to?
"""
# for ax, label in zip(self.axes[-1, :], self.x_vars):
# ax.set_xlabel(label)
# for ax, label in zip(self.axes[:, 0], self.y_vars):
# ax.set_ylabel(label)
for i, j in zip(*np.tril_indices_from(pg.axes, -1)):
ax = pg.axes[i, j]
# WHY ARE THINGS INVERTED
xlabel = pg.x_vars[j]
ylabel = pg.y_vars[i]
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def read_qchem_eda_v1(filename: str, is_cp: bool = False):
"""Units of returned values: kcal/mol"""
almo_data_snap = dict()
with open(filename) as fh:
for line in fh:
if 'Frozen Density ( FRZ )' in line:
frz = float(line.split()[-1])
almo_data_snap['frz'] = frz
if 'Polarization ( POL )' in line:
pol = float(line.split()[-1])
almo_data_snap['pol'] = pol
if 'RS Delocalization ( P-DEL )' in line:
rs_del = float(line.split()[-1])
almo_data_snap['del_rs'] = rs_del
if 'RS Basis set superposition error ( P-BSSE )' in line:
assert is_cp
rs_bsse = float(line.split()[-1])
almo_data_snap['bsse_rs'] = rs_bsse
if 'RS Charge-transfer ( P-CT = P-DEL + P-BSSE )' in line:
assert is_cp
rs_ct = float(line.split()[-1])
assert abs(rs_del + rs_bsse - rs_ct) < 1.0e-4
almo_data_snap['ct_rs'] = rs_ct
if 'SCF Delocalization ( V-DEL )' in line:
scf_del = float(line.split()[-1])
almo_data_snap['del_scf'] = scf_del
if 'SCF Basis set superposition error ( V-BSSE )' in line:
assert is_cp
scf_bsse = float(line.split()[-1])
almo_data_snap['bsse_scf'] = scf_bsse
if 'SCF Charge-transfer ( V-CT = V-DEL + V-BSSE )' in line:
assert is_cp
scf_ct = float(line.split()[-1])
assert abs(scf_del + scf_bsse - scf_ct) < 1.0e-4
almo_data_snap['ct_scf'] = scf_ct
if 'RS Total ( P-TOT = FRZ + POL + P-DEL )' in line:
assert not is_cp
rs_tot = float(line.split()[-1])
almo_data_snap['tot_rs'] = rs_tot
if 'RS Total ( P-TOT = FRZ + POL + P-CT )' in line:
assert is_cp
rs_tot = float(line.split()[-1])
almo_data_snap['tot_rs'] = rs_tot
if 'SCF Total ( V-TOT = FRZ + POL + V-DEL )' in line:
assert not is_cp
scf_tot = float(line.split()[-1])
almo_data_snap['tot_scf'] = scf_tot
if 'SCF Total ( V-TOT = FRZ + POL + V-CT )' in line:
assert is_cp
scf_tot = float(line.split()[-1])
almo_data_snap['tot_scf'] = scf_tot
if 'Higher order relaxation ( HO = V-TOT - P-TOT )' in line:
scf_ho = float(line.split()[-1])
assert abs(scf_tot - rs_tot - scf_ho) < 1.0e-4
almo_data_snap['ho_scf'] = scf_ho
for k in almo_data_snap:
almo_data_snap[k] /= KJ_TO_KCAL
return almo_data_snap
def read_qchem_eda_v2(filename: str):
"""Units of returned values: kcal/mol"""
almo_data = dict()
fh = open(filename)
line = next(fh)
while "Results of EDA2" not in line:
line = next(fh)
line = next(fh)
assert line.strip() == "================================"
line = next(fh)
assert line.strip() == "Basic EDA Quantities"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == "Fragment Energies (Ha):"
line = next(fh)
while line.strip() != "--------------------":
# Do nothing with the fragment energies for now
line = next(fh)
line = next(fh)
e_prp = float(line.split()[3])
almo_data["prp"] = e_prp
line = next(fh)
e_sol = float(line.split()[3])
almo_data["sol"] = e_sol
line = next(fh)
e_frz = float(line.split()[3])
almo_data["frz"] = e_frz
line = next(fh)
e_pol = float(line.split()[3])
almo_data["pol"] = e_pol
line = next(fh)
e_vct = float(line.split()[3])
almo_data["vct"] = e_vct
line = next(fh)
e_int = float(line.split()[3])
almo_data["int"] = e_int
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == ""
line = next(fh)
assert line.strip() == ""
line = next(fh)
assert line.strip() == "Decomposition of frozen interaction energy"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == "Orthogonal Frozen Decomposition:"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
e_elec = float(line.split()[4])
almo_data["elec"] = e_elec
line = next(fh)
e_pauli = float(line.split()[4])
almo_data["pauli"] = e_pauli
line = next(fh)
# e_disp = float(line.split()[4])
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == "Classical Frozen Decomposition:"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
e_cls_elec = float(line.split()[5])
almo_data["cls_elec"] = e_cls_elec
line = next(fh)
e_mod_pauli = float(line.split()[5])
almo_data["mod_pauli"] = e_mod_pauli
line = next(fh)
e_disp = float(line.split()[4])
almo_data["disp"] = e_disp
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == ""
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
assert line.strip() == "Perturbative CT Analysis:"
line = next(fh)
assert line.strip() == "--------------------"
line = next(fh)
e_pct = float(line.split()[3])
almo_data["pct"] = e_pct
line = next(fh)
e_HO = float(line.split()[3])
almo_data["HO"] = e_HO
line = next(fh)
assert line.strip() == "---------------"
# TODO PCT Energy lowering
# TODO PCT Charge displacement
fh.close()
for k in almo_data:
almo_data[k] /= KJ_TO_KCAL
return almo_data
def make_snapnum_to_bin_map():
snapshots_filename = '/home/eric/Chemistry/calc.sgr/paper_02_CD_SC/inputs_freq/representative_snapshots_1qm'
snapnum_to_bin_map = dict()
with open(snapshots_filename) as fh:
for line in fh:
if line[0] == '#':
binnum = int(line.split()[2])
else:
snapnum = int(line.strip())
snapnum_to_bin_map[snapnum] = binnum
return snapnum_to_bin_map
# B3LYP/6-31G(d,p)
BIN_TO_WEIGHT_MAP = {
1: 0.06060606,
2: 0.24747475,
3: 0.4010101,
4: 0.22626263,
5: 0.06464646,
}
SAPT_HEADERS_MONOMER = [
'// Monomer Basis SAPT //',
]
SAPT_HEADERS_DIMER = [
'// SAPT0 //',
'// Dimer Basis SAPT //',
]
SAPT_HEADERS = SAPT_HEADERS_MONOMER + SAPT_HEADERS_DIMER
SAPT_BASES = ('monomer', 'dimer')
def read_psi4_sapt_section(fi, pos: int = 1, calculation_thresh: float = 1.0e-7):
"""All returned values have units of kcal/mol."""
sapt_single_basis_data = dict()
line = ''
while 'SAPT Results' not in line:
line = next(fi)
line = next(fi)
assert '------' in line
line = next(fi)
assert 'Electrostatics' in line
val_electrostatics = float(re_number.findall(line)[pos])
sapt_single_basis_data['el'] = val_electrostatics
line = next(fi)
assert 'Elst10,r' in line
line = next(fi)
assert line.strip() == ''
line = next(fi)
assert 'Exchange' in line
val_exchange = float(re_number.findall(line)[pos])
sapt_single_basis_data['exch'] = val_exchange
line = next(fi)
assert 'Exch10' in line
line = next(fi)
assert 'Exch10(S^2)' in line
line = next(fi)
assert line.strip() == ''
line = next(fi)
assert 'Induction' in line
line = next(fi)
assert 'Ind20,r' in line
val_induction = float(re_number.findall(line)[pos])
sapt_single_basis_data['ind'] = val_induction
line = next(fi)
assert 'Exch-Ind20,r' in line
val_exchange_induction = float(re_number.findall(line)[pos])
sapt_single_basis_data['exch-ind'] = val_exchange_induction
line = next(fi)
assert 'delta HF,r (2)' in line
val_induction_delta_hf = float(re_number.findall(line)[pos])
sapt_single_basis_data['ind_HO'] = val_induction_delta_hf
line = next(fi)
assert line.strip() == ''
line = next(fi)
assert 'Dispersion' in line
line = next(fi)
assert 'Disp20' in line
val_dispersion = float(re_number.findall(line)[pos])
sapt_single_basis_data['disp'] = val_dispersion
line = next(fi)
assert 'Exch-Disp20' in line
val_exchange_dispersion = float(re_number.findall(line)[pos])
sapt_single_basis_data['exch-disp'] = val_exchange_dispersion
while 'Total SAPT0' not in line:
line = next(fi)
sapt0_total_calculated = float(re_number.findall(line)[pos])
sapt0_total = val_electrostatics + \
val_exchange + \
val_induction + \
val_exchange_induction + \
val_induction_delta_hf + \
val_dispersion + \
val_exchange_dispersion
assert abs(sapt0_total - sapt0_total_calculated) < calculation_thresh
sapt_single_basis_data['total'] = sapt0_total
return sapt_single_basis_data
def read_psi4_sapt0(filename: str, pos: int = 1):
"""All returned values have units of kcal/mol."""
fi = make_file_iterator(filename)
sapt_data = dict()
# Collect both dimer-centered and monomer-centered SAPT basis
# data.
for line in fi:
# Dimer results always come before monomer results.
if any(sapt_header in line for sapt_header in SAPT_HEADERS_DIMER):
sapt_data['dimer'] = read_psi4_sapt_section(fi, pos)
if any(sapt_header in line for sapt_header in SAPT_HEADERS_MONOMER):
sapt_data['monomer'] = read_psi4_sapt_section(fi, pos)
break
# Finally, check to see if a charge transfer (CT) calculation has
# been performed.
for line in fi:
if 'SAPT Charge Transfer Analysis' in line:
line = next(fi)
assert list(set(line.strip())) == ['-']
line = next(fi)
# Asserts here comparing to induction values that were
# parsed earlier?
assert 'SAPT Induction (Dimer Basis)' in line
line = next(fi)
assert 'SAPT Induction (Monomer Basis)' in line
line = next(fi)
assert 'SAPT Charge Transfer' in line
ct = float(re_number.findall(line)[pos])
sapt_data['ct'] = ct
return sapt_data
def read_psi4_sapt0_with_snapnum_and_weight(filename):
snapnum_to_bin_map = make_snapnum_to_bin_map()
stub = os.path.splitext(os.path.basename(filename))[0]
stub_tokens = stub.split('_')
snapnum = int(stub_tokens[1])
binnum = snapnum_to_bin_map[snapnum]
sapt_data = read_psi4_sapt0(filename)
sapt_data['snapnum'] = snapnum
sapt_data['weight'] = BIN_TO_WEIGHT_MAP[binnum]
return sapt_data
def df_weighted_average(df, weights):
# Have to do things manually with Pandas, it seems.
weights = np.array(weights).reshape((len(weights), 1))
weights_sum = np.sum(weights)
return (weights * df.select_dtypes(include=[np.number])).sum(axis=0) / weights_sum
|
StarcoderdataPython
|
37760
|
from rest_framework import serializers
class ProductSerializer(serializers.Serializer):
product = serializers.ListField(
child=serializers.CharField(max_length=200))
|
StarcoderdataPython
|
5157532
|
# script for running parameter scans of agent based simulation
import os.path, sys
sys.path.append('../lib/')
import numpy as np
from evolimmune import (from_tau, mus_from_str, cup_from_str,
agentbasedsim_evol, zstogrowthrate)
import cevolimmune
from misc import *
# general model parameters
lambdas = 3.0
muss = '1.0-2.0*epsilon/(1.0+epsilon), 1.0+0.8*epsilon'
cups = '0.1*pup+pup**2'
# finite population model parameters
Ls = 1
ninds = [50, 100, 1000]
aenvs = from_tau(np.logspace(np.log10(0.09), np.log10(11.0), 40, True))
pienvs = [0.3, 0.5, 0.7]
# numerical parameters
ngens = [100000]
# parameter evolution parameters
mutrates = lambda gen: 1e-2 * np.exp(-gen/1e4)
mutsizes = lambda gen: 0.25 * np.exp(-gen/1e4)
# script parameters
nbatch = 1
nruns = 50
datadir = 'data'
paramscomb = params_combination((Ls, lambdas, muss, cups, aenvs, pienvs, ninds, mutrates, mutsizes))
if parametercheck(datadir, sys.argv, paramscomb, nbatch):
ngens = np.asarray([ngens]) if isinstance(ngens, int) else np.asarray(ngens)
index = int(sys.argv[1])
njob = (index-1) % len(paramscomb)
data = []
for run in progressbar(range(nbatch * nruns)):
n = njob * nbatch + run // nruns
L, lambda_, mus, cup, aenv, pienv, nind, mutrate, mutsize = paramscomb[n]
print paramscomb[n]
fmus = mus_from_str(mus)
fcup = cup_from_str(cup)
def xi(ind, env, epsilon, pup):
mu1, mu2 = fmus(epsilon)
return cevolimmune.xi_pa(ind, env, lambda_, mu1, mu2) * np.exp(-fcup(pup).sum(axis=1))
# ngens is handled by running one long simulation and storing intermediate results
zs, pis, as_, pups, epsilons = agentbasedsim_evol(aenv, pienv, xi,
L=L,
nind=nind,
ngens=ngens,
mutrate=mutrate,
mutsize=mutsize,
evolvep=True,
evolveq=True,
evolvepup=True,
evolveepsilon=True
)
Lambdas = [zstogrowthrate(zs[(ngens[i-1] if i > 0 else 0):ngens[i]]) for i in range(len(ngens))]
for j, ngen in enumerate(ngens):
data.append([L, lambda_, mus, cup, aenv, pienv, nind, ngen, as_[j], pis[j], pups[j], epsilons[j], Lambdas[j]])
columns = ['L', 'lambda_', 'mus', 'cup', 'aenv', 'pienv', 'nind', 'ngen', 'a', 'pi', 'pup', 'epsilon', 'Lambda']
np.savez_compressed(datadir + 'scan_ind%g' % (index), data=data, columns=columns)
|
StarcoderdataPython
|
5141225
|
<reponame>skaben/server_core
from alert.serializers import AlertStateSerializer
from core.models import AlertCounter, AlertState
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
API_URL = reverse('api:alertstate-list')
class TestPublicAlertApi(APITestCase):
"""Test the publicly available locks API"""
def test_login_required(self):
"""Test that login required for retrieving tags"""
res = self.client.get(API_URL)
assert res.status_code == status.HTTP_401_UNAUTHORIZED
class TestPrivateAlertStateApi(APITestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_alert_states(self):
for x in range(2):
AlertState.objects.create(name=f'test_state_{x}',
info='notimportant',
current=False
)
res = self.client.get(API_URL)
states = AlertState.objects.all()
serializer = AlertStateSerializer(states, many=True)
assert res.status_code == status.HTTP_200_OK
assert res.data == serializer.data
def test_alert_state_set_current_solitude(self):
""" Test action set_current for solitude alert state
without any other AlertState objects
"""
state = AlertState.objects.create(name='statename',
info='testinfo',
current=False)
instance_url = f'{API_URL}{str(state.id)}/set_current/'
res = self.client.post(instance_url)
patched = AlertState.objects.get(id=state.id)
assert res.status_code == status.HTTP_200_OK, res.data
assert patched.info == state.info, 'state change expected'
def test_alert_state_set_current_normal(self):
""" Test action set_current for alert state"""
old = AlertState.objects.create(name='stateold',
info='test2',
current=True)
new = AlertState.objects.create(name='statenew',
info='test',
current=False)
counter = AlertCounter.objects.create(value='100500',
comment='test')
instance_url = f'{API_URL}{str(new.id)}/set_current/'
res = self.client.post(instance_url)
new_current = AlertState.objects.get(id=new.id)
old_current = AlertState.objects.get(id=old.id)
counter_new = AlertCounter.objects.latest('id')
assert res.status_code == status.HTTP_200_OK
assert old_current.current is False, 'change to False expected'
assert new_current.current is True, 'change to True expected'
assert counter_new.id != counter.id, 'counter create expected'
def test_create_alert_state_fail(self):
""" Test create new alert state via API fails """
name = 'notastate'
payload = {'name': name,
'current': True}
res = self.client.post(API_URL, payload)
exists = AlertState.objects.filter(name=name).exists()
assert not exists
assert res.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_update_alert_state_fail(self):
""" Test partial update existing alert state fails """
state = AlertState.objects.create(name='statename',
info='testinfo',
current=False)
payload = {'current': True, 'name': '12345'}
instance_url = API_URL + str(state.id) + '/'
res = self.client.patch(instance_url, payload)
assert res.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_delete_alert_state_fail(self):
""" Test delete alert state via API fails """
state = AlertState.objects.create(name='test')
instance_url = API_URL + str(state.id) + '/'
res = self.client.delete(instance_url)
exists = AlertState.objects.filter(id=state.id).exists()
assert exists
assert res.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
|
StarcoderdataPython
|
11247728
|
<reponame>DLC01/elvis
"""
Created By <NAME>
September 2020
@parzuko
"""
import discord
from discord.ext import commands
import mysql.connector
import random
# This is not an ideal example and would ideally be a database hosted on a server
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "<PASSWORD>",
database = "userlevels",
auth_plugin = "mysql_native_password"
)
class XPLeveling(commands.Cog):
def __init__(self, elvis):
self.elvis = elvis
def get_xp(self):
return random.randint(1,5)
def get_level(self, new_xp):
current_level = ""
if new_xp < 20:
current_level = "Aquintances"
elif new_xp >= 20 and new_xp < 60:
current_level = "Friends"
elif new_xp >= 60 and new_xp < 120:
current_level = "Best Friends"
elif new_xp >= 120 and new_xp < 240:
current_level = "Homies"
elif new_xp >= 240:
current_level = "Brothers"
return current_level
@commands.command(name="friendship", aliases=["dosti", "xp"])
async def _xp(self, ctx):
user_id = ctx.message.author.id
name = ctx.message.author.name
cursor = mydb.cursor()
try:
cursor.execute(f"SELECT friendship_level FROM xp WHERE client_id = {user_id}")
result = cursor.fetchall()
level = result[0][0]
embed = discord.Embed(
title = f"**{name} and Elvis are `{level}`.**",
color=discord.Color.teal(),
)
await ctx.send(embed=embed)
except Exception:
embed = discord.Embed(
title = f"**{name} and Elvis are meeting for the first time!**",
color=discord.Color.from_rgb(244,66,146),
)
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
if message.content[0] == ".":
xp = self.get_xp()
user_id = message.author.id
name = message.author.name
cursor = mydb.cursor()
cursor.execute(f"SELECT user_xp, friendship_level FROM xp WHERE client_id = {user_id}")
result = cursor.fetchall()
if len(result) == 0:
cursor.execute(f"INSERT INTO xp VALUES({user_id},{xp},'Aquintances')")
mydb.commit()
embed = discord.Embed(
title = f"**{name} and Elvis are now `Aquintances`**",
color=discord.Color.teal()
)
await message.channel.send(embed=embed)
else:
new_xp = result[0][0] + xp
current_level = result[0][1]
flag = False
new_level = self.get_level(new_xp)
if current_level != new_level:
flag = True
current_level = new_level
cursor.execute(f"UPDATE xp SET user_xp = {new_xp}, friendship_level = '{current_level}' WHERE client_id = {message.author.id}")
mydb.commit()
if flag:
embed = discord.Embed(
title = f"**{name} and Elvis are now `{current_level}`**",
color=discord.Color.teal(),
)
await message.channel.send(embed=embed)
def setup(elvis):
elvis.add_cog(XPLeveling(elvis))
|
StarcoderdataPython
|
8152272
|
<gh_stars>0
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Mixed membership stochastic block model (Godoy-Lorite et al. 2016)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#----------------------------------------------------------------------
# PURPOSE:
# This method attempt to predict missing links within
# a network based on observed links.
#---------------------------------------------------------------------
# In our case we attempt to predict the missing link between a place
# and a visitor based on the choices users made without categorising
# them in a defined group
#-----------------------------------------------------------------------
# NOTE: [Antonia's code]
# 1) Script modified to speed up code on python
# 2) Order of vectors not the same as in Antonia's code
# 3) Some modifications are still required to run the prediction.dat bit
#-----------------------------------------------------------------------
#importing the different modules
import sys
# import numpy as np (short form)
#import _numpypy as np
import numpy as np
from math import *
import copy
import random
import csv
def read_files(training, zeros_as_null):
user_dict = {}
place_dict = {}
visits = {}
linksr = []
file = open(training,"r")
for line in file:
about = line.strip().split("\t")
if int(about[2])!=0 or not zeros_as_null:
try:
x = user_dict[about[0]][0]
user_dict[about[0]][1] += 1
except KeyError:
#x = len(user_dict)
x = int(about[0])-1
user_dict[about[0]] = [x, 1]
try:
y = place_dict[about[1]][0]
place_dict[about[1]][1] += 1
except KeyError:
#y = len(place_dict)
y = int(about[1])-1
place_dict[about[1]] = [y, 1]
try:
v = visits[about[2]]
except KeyError:
v = len(visits)
visits[about[2]] = v
linksr.append([x, y, v])
file.close()
return user_dict, place_dict, linksr, visits
def calc_likelihood(linksr, theta, eta, L, K, pr):
Like = 0.
for n, m, ra in linksr:
D = 0.
for l in range(L):
for k in range(K):
D = D+theta[n][k]*eta[m][l]*pr[ra][k][l]
for l in range(L):
for k in range(K):
Like = Like+(theta[n][k]*eta[m][l]*pr[ra][k][l])*log(D)/D
return Like
def sampling(c, ofolder, linksr, nsampling, iterations, K, L, R,
user_dict, n_users, users_denom, place_dict, n_places, places_denom,
verbose, study_likelyhood, alloutput):
if verbose:
sys.stderr.write(" ".join(['sampling',str(c),'\n']))
#theta = vector containing the different groups to which each user belongs to
theta = np.random.rand(n_users,K) / users_denom[:,np.newaxis]
#eta = vector containing the different groups to which each place belongs to
eta = np.random.rand(n_places,L) / places_denom[:,np.newaxis]
# 3d matrix containing random probabilities of ratings across user-group and place-group combos
# NOTE: I have changed the structure of this and related variables!!!
pr = np.random.rand(R, K, L)
# normalize the probabilities across ratings
# should divide by: sum of all ratings corresponding to a group-group combo
pr = pr / pr.sum(axis=0)
# create empty containers for the calculations that are made during each iteration
ntheta = np.zeros((n_users, K))
neta = np.zeros((n_places, L))
npr = np.zeros((R, K, L))
Like=[]
################################################################################
for g in range(iterations):
if verbose:
sys.stderr.write(" ".join(['iteration',str(g),'\n']))
if study_likelyhood:
Like+=[calc_likelihood(linksr, theta, eta, L, K, pr)]
# update the parameters using each observed 'rating'
for n, m, ra in linksr:
# calculate the sum of all mixtures for rating ra by
# multiplying rating probabilities rowwise by the user group
# membership probabilities and columnwise by the place group
# membership probabilities
D = (pr[ra].T * theta[n]).T * eta[m]
# normalize these values
a = D / D.sum()
# update the new (n) parameter estimates
npr[ra] = npr[ra] + a
ntheta[n] = ntheta[n] + a.sum(axis=1)
neta[m] = neta[m] + a.sum(axis=0)
# normalize the users' membership probabilities across groups
ntheta = ntheta / users_denom[:,np.newaxis]
# normalize the places' membership probabilities across groups
neta = neta / places_denom[:,np.newaxis]
# normalize the probabilities across ratings
npr = npr / npr.sum(axis=0)
# create copies of previous values and zero'd estimates as placeholders
theta = copy.deepcopy(ntheta)
eta = copy.deepcopy(neta)
pr = copy.deepcopy(npr)
# restart arrays
ntheta = ntheta*0
neta = neta*0
npr = npr*0
# calculate the likelihood given the probabilities
if study_likelyhood:
Like += [calc_likelihood(linksr, theta, eta, L, K, pr)]
else:
Like = calc_likelihood(linksr, theta, eta, L, K, pr)
if verbose:
print Like[-1]
#inv_user = {v[0]: k for k, v in user_dict.iteritems()}
#id_user = np.asarray([int(inv_user[x]) for x in np.sort(np.asarray(inv_user.keys()))])
#inv_place = {v[0]: k for k, v in place_dict.iteritems()}
#id_place = np.asarray([int(inv_place[x]) for x in np.sort(np.asarray(inv_place.keys()))])
#theta_=theta[id_user,:]
#eta_=eta[id_place,:]
if alloutput:
np.save(ofolder + "_".join([str(c), str(K), str(L)]) + "_theta", theta)
np.save(ofolder + "_".join([str(c), str(K), str(L)]) + "_eta", eta)
np.save(ofolder + "_".join([str(c), str(K), str(L)]) + "_pr", pr)
np.save(ofolder + "_".join([str(c), str(K), str(L)]) + "_like", np.asarray(Like))
return Like
def run_sampling(training,
ofolder="../../results/test/",
K=10,
L=10,
nsampling=0,
iterations=200,
zeros_as_null=False,
verbose=False,
study_likelyhood=True,
alloutput=True):
if not zeros_as_null:
sys.stderr.write("\nCareful! Zeros in your data will represent a type of interaction and will be used in the sampling.\n\n")
user_dict, place_dict, linksr, visits = read_files(training, zeros_as_null)
n_users = len(user_dict)
n_places = len(place_dict)
R = len(visits)
users_denom = np.asarray(user_dict.values())
users_denom = users_denom[users_denom[:,0].argsort(),1]
places_denom = np.asarray(place_dict.values())
places_denom = places_denom[places_denom[:,0].argsort(),1]
lkl = sampling(nsampling, ofolder, linksr, nsampling, iterations, K, L, R, user_dict, n_users, users_denom, place_dict, n_places, places_denom, verbose, study_likelyhood, alloutput)
return lkl
|
StarcoderdataPython
|
5087146
|
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from .. import qtutil
from ...external.qt import QtGui
from ...external.qt.QtCore import Qt
from mock import MagicMock, patch
from ..qtutil import GlueDataDialog
from ..qtutil import pretty_number, GlueComboBox
from glue.config import data_factory
from glue.core import Subset
def test_glue_action_button():
a = QtGui.QAction(None)
a.setToolTip("testtooltip")
a.setWhatsThis("testwhatsthis")
a.setIcon(QtGui.QIcon("dummy_file"))
a.setText('testtext')
b = qtutil.GlueActionButton()
b.set_action(a)
# assert b.icon() == a.icon() icons are copied, apparently
assert b.text() == a.text()
assert b.toolTip() == a.toolTip()
assert b.whatsThis() == a.whatsThis()
#stays in sync
a.setText('test2')
assert b.text() == 'test2'
@data_factory('testing_factory', '*.*')
def dummy_factory(filename):
from glue.core import Data
result = Data()
result.made_with_dummy_factory = True
return result
dummy_factory_member = [f for f in data_factory.members
if f[0] is dummy_factory][0]
class TestGlueDataDialog(object):
def test_factory(self):
"""Factory method should always match with filter"""
fd = GlueDataDialog()
assert len(fd.filters) > 0
for k, v in fd.filters:
fd._fd.setFilter(v)
assert fd.factory() is k
def test_load_data_cancel(self):
"""Return None if user cancels operation"""
fd = GlueDataDialog()
mock_file_exec(fd, cancel=True)
assert fd.load_data() == []
def test_load_data_normal(self):
"""normal load_data dispatches path to factory"""
fd = GlueDataDialog()
mock_file_exec(fd, cancel=False, path='ld_data_nrml',
factory=dummy_factory_member)
d = fd.load_data()
assert len(d) == 1
d = d[0]
assert d.label == 'ld_data_nrml'
assert d.made_with_dummy_factory is True
def test_filters(self):
"""Should build filter list from data_factories env var"""
fd = GlueDataDialog()
assert len(fd.filters) == len(data_factory.members)
def test_load_multiple(self):
fd = GlueDataDialog()
mock_file_exec(fd, cancel=False, path=['a.fits', 'b.fits'],
factory=dummy_factory_member)
ds = fd.load_data()
assert len(ds) == 2
for d, label in zip(ds, 'ab'):
assert d.label == label
assert d.made_with_dummy_factory is True
def mock_file_exec(fd, cancel=False, path='junk',
factory=dummy_factory_member):
if not isinstance(path, list):
path = [path]
fd._fd.exec_ = MagicMock()
fd._fd.exec_.return_value = 1 - cancel
fd.factory = MagicMock()
fd.factory.return_value = factory
fd.paths = MagicMock()
fd.paths.return_value = path
def test_data_wizard_cancel():
"""Returns empty list if user cancel's dialog"""
with patch('glue.qt.qtutil.GlueDataDialog') as mock:
mock().load_data.return_value = []
assert qtutil.data_wizard() == []
def test_data_wizard_normal():
"""Returns data list if successful"""
with patch('glue.qt.qtutil.GlueDataDialog') as mock:
mock().load_data.return_value = [1]
assert qtutil.data_wizard() == [1]
def test_data_wizard_error_cancel():
"""Returns empty list of error generated and then canceled"""
with patch('glue.qt.qtutil.GlueDataDialog') as mock:
mock().load_data.side_effect = Exception
with patch('glue.qt.qtutil.QMessageBox') as qmb:
qmb().exec_.return_value = 0
assert qtutil.data_wizard() == []
class TestPrettyNumber(object):
def test_single(self):
assert pretty_number([1]) == ['1']
assert pretty_number([0]) == ['0']
assert pretty_number([-1]) == ['-1']
assert pretty_number([1.0001]) == ['1']
assert pretty_number([1.01]) == ['1.01']
assert pretty_number([1e-5]) == ['1.000e-05']
assert pretty_number([1e5]) == ['1.000e+05']
assert pretty_number([3.3]) == ['3.3']
def test_list(self):
assert pretty_number([1, 2, 3.3, 1e5]) == ['1', '2', '3.3',
'1.000e+05']
class TestGlueComboBox(object):
def setup_method(self, method):
self.combo = GlueComboBox()
def test_add_data(self):
self.combo.addItem('hi', userData=3)
assert self.combo.itemData(0) == 3
def test_add_multi_data(self):
self.combo.addItem('hi', userData=3)
self.combo.addItem('ho', userData=4)
assert self.combo.itemData(0) == 3
assert self.combo.itemData(1) == 4
def test_replace(self):
self.combo.addItem('hi', userData=3)
self.combo.removeItem(0)
self.combo.addItem('ho', userData=4)
assert self.combo.itemData(0) == 4
def test_clear(self):
self.combo.addItem('a', 1)
self.combo.addItem('b', 2)
self.combo.addItem('c', 3)
self.combo.clear()
self.combo.addItem('d', 4)
assert self.combo.itemData(0) == 4
def test_mid_remove(self):
self.combo.addItem('a', 1)
self.combo.addItem('b', 2)
self.combo.addItem('c', 3)
self.combo.removeItem(1)
assert self.combo.itemData(1) == 3
def test_set_item_data(self):
self.combo.addItem('a', 1)
self.combo.setItemData(0, 2)
assert self.combo.itemData(0) == 2
def test_default_data(self):
self.combo.addItem('a')
assert self.combo.itemData(0) is None
def test_add_items(self):
self.combo.addItem('a', 1)
self.combo.addItems(['b', 'c', 'd'])
assert self.combo.itemData(0) == 1
assert self.combo.itemData(1) is None
assert self.combo.itemData(2) is None
assert self.combo.itemData(3) is None
def test_non_user_role(self):
"""methods that edit data other than userRole dispatched to super"""
self.combo.addItem('a', 1)
assert self.combo.itemData(0, role=Qt.DisplayRole) == 'a'
self.combo.setItemData(0, 'b', role=Qt.DisplayRole)
assert self.combo.itemData(0, role=Qt.DisplayRole) == 'b'
def test_consistent_with_signals(self):
"""Ensure that when signal/slot connections interrupt
methods mid-call, internal data state is consistent"""
# Qt swallows exceptions in signals, so we can't assert in this
# instead, store state and assert after signal
good = [False]
def assert_consistent(*args):
good[0] = len(self.combo._data) == self.combo.count()
# addItem
self.combo.currentIndexChanged.connect(assert_consistent)
self.combo.addItem('a', 1)
assert good[0]
# addItems
self.combo.clear()
good[0] = False
self.combo.addItems('b c d'.split())
assert good[0]
# removeItem
self.combo.clear()
self.combo.addItem('a', 1)
good[0] = False
self.combo.removeItem(0)
assert good[0]
def test_qt4_to_mpl_color():
assert qtutil.qt4_to_mpl_color(QtGui.QColor(255, 0, 0)) == '#ff0000'
assert qtutil.qt4_to_mpl_color(QtGui.QColor(255, 255, 255)) == '#ffffff'
def test_edit_color():
with patch('glue.qt.qtutil.QColorDialog') as d:
d.getColor.return_value = QtGui.QColor(0, 1, 0)
d.isValid.return_value = True
s = Subset(None)
qtutil.edit_layer_color(s)
assert s.style.color == '#000100'
def test_edit_color_cancel():
with patch('glue.qt.qtutil.QColorDialog') as d:
d.getColor.return_value = QtGui.QColor(0, -1, 0)
s = Subset(None)
qtutil.edit_layer_color(s)
def test_edit_symbol():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getItem.return_value = ('*', True)
s = Subset(None)
qtutil.edit_layer_symbol(s)
assert s.style.marker == '*'
def test_edit_symbol_cancel():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getItem.return_value = ('*', False)
s = Subset(None)
qtutil.edit_layer_symbol(s)
assert s.style.marker != '*'
def test_edit_point_size():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getInt.return_value = 123, True
s = Subset(None)
qtutil.edit_layer_point_size(s)
assert s.style.markersize == 123
def test_edit_point_size_cancel():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getInt.return_value = 123, False
s = Subset(None)
qtutil.edit_layer_point_size(s)
assert s.style.markersize != 123
def test_edit_layer_label():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getText.return_value = ('accepted label', True)
s = Subset(None)
qtutil.edit_layer_label(s)
assert s.label == 'accepted label'
def test_edit_layer_label_cancel():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getText.return_value = ('rejected label', False)
s = Subset(None)
qtutil.edit_layer_label(s)
assert s.label != 'rejected label'
def test_pick_item():
items = ['a', 'b', 'c']
labels = ['1', '2', '3']
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getItem.return_value = '1', True
assert qtutil.pick_item(items, labels) == 'a'
d.getItem.return_value = '2', True
assert qtutil.pick_item(items, labels) == 'b'
d.getItem.return_value = '3', True
assert qtutil.pick_item(items, labels) == 'c'
d.getItem.return_value = '3', False
assert qtutil.pick_item(items, labels) is None
def test_pick_class():
class Foo:
pass
class Bar:
pass
Bar.LABEL = 'Baz'
with patch('glue.qt.qtutil.pick_item') as d:
qtutil.pick_class([Foo, Bar])
d.assert_called_once_with([Foo, Bar], ['Foo', 'Baz'])
def test_get_text():
with patch('glue.qt.qtutil.QInputDialog') as d:
d.getText.return_value = 'abc', True
assert qtutil.get_text() == 'abc'
d.getText.return_value = 'abc', False
assert qtutil.get_text() is None
class TestGlueListWidget(object):
def setup_method(self, method):
self.w = qtutil.GlueListWidget()
def test_mime_type(self):
assert self.w.mimeTypes() == [qtutil.LAYERS_MIME_TYPE]
def test_mime_data(self):
self.w.set_data(3, 'test data')
self.w.set_data(4, 'do not pick')
mime = self.w.mimeData([3])
mime.data(qtutil.LAYERS_MIME_TYPE) == ['test data']
def test_mime_data_multiselect(self):
self.w.set_data(3, 'test data')
self.w.set_data(4, 'also pick')
mime = self.w.mimeData([3, 4])
mime.data(qtutil.LAYERS_MIME_TYPE) == ['test data', 'also pick']
class TestRGBEdit(object):
def setup_method(self, method):
from glue.clients.layer_artist import RGBImageLayerArtist
from glue.core import Data
d = Data()
self.artist = RGBImageLayerArtist(d, None)
self.w = qtutil.RGBEdit(artist=self.artist)
def test_update_visible(self):
for color in ['red', 'green', 'blue']:
state = self.artist.layer_visible[color]
self.w.vis[color].click()
assert self.artist.layer_visible[color] != state
def test_update_current(self):
for color in ['red', 'green', 'blue']:
self.w.current[color].click()
assert self.artist.contrast_layer == color
|
StarcoderdataPython
|
5002850
|
from setuptools import setup
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="rwafaker",
version='2.0.6',
url="https://github.com/knowbee/py-rwafaker.git",
description="This package generates massive amounts of realistic fake data in Rwanda native language (Ikinyarwanda)",
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=["rwafake"],
package_dir={'': 'rwafake'},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"
],
keywords='hi',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
zip_safe=False,
)
|
StarcoderdataPython
|
3379416
|
"""Desenvolva um programa que pergunte a distância de uma viagem em Km.
Calcule o preço da passagem, cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 parta viagens mais longas."""
distancia = float(input('Qual a distância da viagem? Km'))
if distancia <= 200:
print('O preço da passagem será de: R${:.2f}'.format(distancia*0.5))
else:
print('O preço da passagem será de: R${:.2f}'.format(distancia*0.45))
|
StarcoderdataPython
|
11335384
|
from albumentations import Compose, PadIfNeeded, LongestMaxSize
class InferenceTransformation:
def __init__(self, width, height):
self.aug = Compose([
LongestMaxSize(max_size=width if width > height else height),
PadIfNeeded(min_height=height, min_width=width, border_mode=cv2.BORDER_CONSTANT)
])
def __call__(self, image):
transformed = self.aug(image=image)
return transformed["image"]
|
StarcoderdataPython
|
35532
|
<gh_stars>0
import numpy as np
import pickle
import tqdm
import os
import torch
from prompts.prompt_material import DETS_LIST, CONTENT_STRUCTS_PREFIX_LIST, CONTENT_STRUCTS_MIDDLE_LIST, CONTENT_STRUCTS_SUFFIX_LIST, TRANSFORMATIONS, LOGICAL_PREFIXES_LIST, LOGICAL_STRUCTS_LW_LIST
#######################################
# #
# CONTENT #
# #
#######################################
class ContentPromptScorer:
def __init__(self, model = None, tokenizer = None, device = None, dataset_name = ''):
# Model used to compute scores
self.model = model
self.tokenizer = tokenizer
self.device = device
# Load prompts materials
self.dets_list = DETS_LIST
self.structs_dict = {'prefix': CONTENT_STRUCTS_PREFIX_LIST,
'middle': CONTENT_STRUCTS_MIDDLE_LIST,
'suffix': CONTENT_STRUCTS_SUFFIX_LIST}
# Load transformations names
self.transformations_names = TRANSFORMATIONS
# Define template
self.vanilla_template = '<PREFIX> <DET1> <WORD1> <MIDDLE> <DET2> <WORD2> <SUFFIX>.'
self.key_template = '<det1>-<det2>-<prefix>-<middle>-<suffix>'
# Compute keys
self._compute_keys()
# Where to save data
self.filename = 'prompts\\scores\\content_prompts_scores_{}'.format(dataset_name)
# Compute Prompts
self.create_prompts()
def _compute_keys(self):
"""
Compute all the possible keys in the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
N_dets = len(self.dets_list)
N_prefix = len(self.structs_dict['prefix'])
N_middle = len(self.structs_dict['middle'])
N_suffix = len(self.structs_dict['suffix'])
list_of_keys = []
for idx_det1 in range(N_dets):
for idx_det2 in range(N_dets):
for idx_prefix in range(N_prefix):
for idx_middle in range(N_middle):
for idx_suffix in range(N_suffix):
key = self.key_template.replace('<det1>', str(idx_det1)).replace('<det2>', str(idx_det2))
key = key.replace('<prefix>', str(idx_prefix)).replace('<middle>', str(idx_middle)).replace('<suffix>', str(idx_suffix))
list_of_keys.append(key)
self.list_of_keys = list_of_keys
def _from_key_to_words(self, key):
"""
Expect a key of the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
list_of_idx = [int(idx) for idx in key.split('-')]
det1 = self.dets_list[list_of_idx[0]]
det2 = self.dets_list[list_of_idx[1]]
prefix = self.structs_dict['prefix'][list_of_idx[2]]
middle = self.structs_dict['middle'][list_of_idx[3]]
suffix = self.structs_dict['suffix'][list_of_idx[4]]
return [det1, det2, prefix, middle, suffix]
def _create_prompt(self, dets, structs):
det1, det2 = dets
prefix, middle, suffix = structs
sentence = self.vanilla_template.replace('<DET1>', det1).replace('<DET2>', det2)
sentence = sentence.replace('<PREFIX>', prefix).replace('<MIDDLE>', middle).replace('<SUFFIX>', suffix)
return sentence
def create_prompts(self):
"""
Returns : keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value sentence
"""
dict_of_prompts = {}
for key in self.list_of_keys:
words_from_keys = self._from_key_to_words(key)
dets, structs = words_from_keys[0:2], words_from_keys[2:5]
sentence = self._create_prompt(dets, structs)
dict_of_prompts[key] = sentence
self.dict_of_prompts = dict_of_prompts
def compute_all_pairs_scores(self, list_of_words):
"""
expect words = list of pairs [HYPONYM, NOUN]
returns : dict -> key "HYPONYM---NOUN"
value dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Compute Prompts Scores
if os.path.exists(self.filename): # Previous save
savefile = open(self.filename, 'rb')
all_pairs_scores_dict = pickle.load(savefile)
savefile.close()
else:
all_pairs_scores_dict = {}
num_treated = 0
for words in tqdm.tqdm(list_of_words, total = len(list_of_words)):
word1, word2 = words
key = word1 + '---' + word2
if key in all_pairs_scores_dict.keys(): #If we have already computed this key go to the next
continue
scores_dict = self.batch_compute_one_pair_scores(words)
all_pairs_scores_dict[key] = scores_dict
num_treated += 1
if num_treated % 20000 == 0: #Save from time to time
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
self.all_pairs_scores_dict = all_pairs_scores_dict
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
def compute_one_pair_scores(self, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Tokenize the words to know the number of masks to add
word1, word2 = words
masked_token_ids_1 = self.tokenizer(word1)['input_ids'][1:-1]
masked_token_ids_2 = self.tokenizer(word2)['input_ids'][1:-1]
N_masks_1 = len(masked_token_ids_1)
N_masks_2 = len(masked_token_ids_2)
# Construct sentences
scores_dict = {}
for transf in self.transformations_names:
transf_score_dict = {}
for key in self.list_of_keys:
vanilla_sentence = self.dict_of_prompts[key]
sentence, mask1_rank, mask2_rank = self.phi(vanilla_sentence, transf, N_masks_1, N_masks_2)
# Compute input_ids and attention_mask of the sentence
encoding = self.tokenizer(sentence,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
# The model needs the masks_to_predict_pos
masks_to_predict_pos = self.find_masks_pos(input_ids)
score_mask1 = self._compute_model_score(input_ids, attention_mask, masked_token_ids_1, masks_to_predict_pos[mask1_rank - 1])
score_mask2 = self._compute_model_score(input_ids, attention_mask, masked_token_ids_2, masks_to_predict_pos[mask2_rank - 1])
transf_score_dict[key] = [score_mask1, score_mask2]
scores_dict[transf] = transf_score_dict
return scores_dict
def _compute_model_score(self, input_ids, attention_mask, masked_token_ids, masks_to_predict_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs_n_ranks = self.model.compute_greedy(input_ids, attention_mask, masks_to_predict_pos, masked_token_ids)
# Compute scores
score = probs_n_ranks[:,0].prod()
return score
def batch_compute_one_pair_scores(self, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Tokenize the words to know the number of masks to add
word1, word2 = words
masked_token_ids_1 = self.tokenizer(word1, return_tensors='pt')['input_ids'][:,1:-1].repeat(len(self.list_of_keys),1).to(self.device)
masked_token_ids_2 = self.tokenizer(word2, return_tensors='pt')['input_ids'][:,1:-1].repeat(len(self.list_of_keys),1).to(self.device)
N_masks_1 = masked_token_ids_1.shape[1]
N_masks_2 = masked_token_ids_2.shape[1]
# Construct sentences
scores_dict = {}
for transf in self.transformations_names:
transf_score_dict = {}
sentences = []
mask1_ranks, mask2_ranks = [], []
for key in self.list_of_keys:
vanilla_sentence = self.dict_of_prompts[key]
sentence, mask1_rank, mask2_rank = self.phi(vanilla_sentence, transf, N_masks_1, N_masks_2)
sentences.append(sentence)
mask1_ranks.append(mask1_rank)
mask2_ranks.append(mask2_rank)
# Compute input_ids and attention_mask of the sentence
encoding = self.tokenizer(sentences,
padding = True,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
# The model needs the masks_to_predict_pos
masks_to_predict_pos = self.batch_find_masks_pos(input_ids) # We suppose this is ok
scores_mask1 = self._batch_compute_model_score(input_ids, attention_mask, masked_token_ids_1, self.helper(masks_to_predict_pos, mask1_ranks).to(self.device))
scores_mask2 = self._batch_compute_model_score(input_ids, attention_mask, masked_token_ids_2, self.helper(masks_to_predict_pos, mask2_ranks).to(self.device))
for idx in range(len(self.list_of_keys)):
key = self.list_of_keys[idx]
transf_score_dict[key] = [scores_mask1[idx].item(), scores_mask2[idx].item()]
scores_dict[transf] = transf_score_dict
return scores_dict
def _batch_compute_model_score(self, input_ids, attention_mask, masked_token_ids, masks_to_predict_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs = self.model.batch_compute_greedy(input_ids, attention_mask, masks_to_predict_pos, masked_token_ids)
# Compute scores
scores = probs.prod(dim=1) # shape [batch_size = len(self.list_of_keys)]
return scores
def batch_find_masks_pos(self, ids_seq):
masks_pos = torch.where(ids_seq == 103)[1]
pos_clusters = []
cluster = []
for k in range(masks_pos.shape[0]):
cluster.append(masks_pos[k])
if (k < len(masks_pos) -1) and (masks_pos[k] + 1 != masks_pos[k + 1]): #The next mask pos does not follow the previous one
pos_clusters.append(torch.LongTensor(cluster))
cluster = []
pos_clusters.append(torch.LongTensor(cluster))
return pos_clusters
def helper(self, list_of_tensors, mask_rank):
batch_size = len(self.list_of_keys)
mask_pos = []
for k in range(batch_size):
mask_pos.append(list_of_tensors[2*k:2*k+2][mask_rank[k] - 1])
return torch.cat(mask_pos)
def find_masks_pos(self, ids_seq):
"""
Compute all mask_token positions in the sequence, then divide it into clusters (following sequence) and returns the mask_rank^th cluster.
"""
def find_all_masks_pos(ids_seq):
pos = []
for k in range(ids_seq.shape[1]):
if ids_seq[0][k] == 103:
pos.append(k)
return pos
all_masks_pos = find_all_masks_pos(ids_seq)
pos_clusters = []
cluster = []
for k in range(len(all_masks_pos)):
cluster.append(all_masks_pos[k])
if (k < len(all_masks_pos) -1) and (all_masks_pos[k] + 1 != all_masks_pos[k + 1]): #The next mask pos does not follow the previous one
pos_clusters.append(cluster)
cluster = []
pos_clusters.append(cluster)
return pos_clusters
def phi(self, vanilla_sentence, transf, N_masks_1, N_masks_2):
"""
Take a sentence s and returns phi(s) and the rank of mask1 (cf. google doc.)
The template vanilla is something like : "MASK1 is MASK2" thus MASK1 is rank 1 and MASK2 is rank 2
Whereas for the transformation opposite : "MASK2 is MASK1" thus MASK1 is rank 2 and MASK2 is rank 1
"""
if transf == 'vanilla':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_1*self.tokenizer.mask_token).replace('<WORD2>', N_masks_2*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 1, 2
elif transf == 'opposite':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_2*self.tokenizer.mask_token).replace('<WORD2>', N_masks_1*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 2, 1
elif transf == 'reverse':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_2*self.tokenizer.mask_token).replace('<WORD2>', N_masks_1*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 2, 1
return sentence, mask1_rank, mask2_rank
#######################################
# #
# LOGICAL #
# #
#######################################
class LogicalPromptScorer:
def __init__(self, model = None, tokenizer = None, device = None, dataset_name = ''):
# Model used to compute scores
self.model = model
self.tokenizer = tokenizer
self.device = device
# Load prompts materials
self.dets_list = DETS_LIST
self.structs_dict = {'prefixes': LOGICAL_PREFIXES_LIST,
'struct_lw': LOGICAL_STRUCTS_LW_LIST}
# Define template
self.vanilla_template = '<PREFIX1> <DET1> <WORD1> <STRUCT_LW> <LW> <PREFIX2> <DET2> <WORD2>.'
self.key_template = '<det1>-<det2>-<prefixes>-<struct_lw>'
# Compute keys
self._compute_keys()
# Where to save data
self.filename = 'prompts\\scores\\logical_prompts_scores_{}'.format(dataset_name)
# Compute Prompts
self.create_prompts()
def _compute_keys(self):
"""
Compute all the possible keys in the form idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
"""
N_dets = len(self.dets_list)
N_prefixes = len(self.structs_dict['prefixes'])
N_struct_lw = len(self.structs_dict['struct_lw'])
list_of_keys = []
for idx_det1 in range(N_dets):
for idx_det2 in range(N_dets):
for idx_prefixes in range(N_prefixes):
for idx_struct_lw in range(N_struct_lw):
key = self.key_template.replace('<det1>', str(idx_det1)).replace('<det2>', str(idx_det2))
key = key.replace('<prefixes>', str(idx_prefixes)).replace('<struct_lw>', str(idx_struct_lw))
list_of_keys.append(key)
self.list_of_keys = list_of_keys
def _from_key_to_words(self, key):
"""
Expect a key of the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
list_of_idx = [int(idx) for idx in key.split('-')]
det1 = self.dets_list[list_of_idx[0]]
det2 = self.dets_list[list_of_idx[1]]
prefixes = self.structs_dict['prefixes'][list_of_idx[2]]
struct_lw = self.structs_dict['struct_lw'][list_of_idx[3]]
return [det1, det2, prefixes, struct_lw]
def _create_prompt(self, dets, prefixes, struct_lw):
det1, det2 = dets
prefix1, prefix2 = prefixes
# Sentence in the right order "This is a seagull, therefore it is a bird."
sentence = self.vanilla_template.replace('<DET1>', det1).replace('<DET2>', det2)
sentence = sentence.replace('<PREFIX1>', prefix1).replace('<PREFIX2>', prefix2).replace('<STRUCT_LW>', struct_lw)
# Sentence in the reverse order "It is a bird, therefore this is a seagull."
sentence_reverse = self.vanilla_template.replace('<DET1>', det2).replace('<DET2>', det1)
sentence_reverse = sentence_reverse.replace('<PREFIX1>', prefix2).replace('<PREFIX2>', prefix1).replace('<STRUCT_LW>', struct_lw)
return sentence, sentence_reverse
def create_prompts(self):
"""
Returns : keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [sentence, sentence_reverse]
"""
dict_of_prompts = {}
for key in self.list_of_keys:
words_from_keys = self._from_key_to_words(key)
dets, prefixes, struct_lw = words_from_keys[0:2], words_from_keys[2], words_from_keys[3]
sentence, sentence_reverse = self._create_prompt(dets, prefixes, struct_lw)
dict_of_prompts[key] = [sentence, sentence_reverse]
self.dict_of_prompts = dict_of_prompts
def compute_all_pairs_scores(self, logical_words, list_of_words):
"""
expect words = list of pairs [HYPONYM, NOUN]
returns : dict -> key "HYPONYM---NOUN"
value dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
# Tokenize the logical words
logical_words_ids = []
for lw in logical_words:
input_ids = self.tokenizer(lw)['input_ids'][1:-1]
assert len(input_ids) == 1 # We only keep logical words mapped to a single token
logical_words_ids.append(input_ids[0])
# Compute Prompts Scores
if os.path.exists(self.filename): # Previous save
savefile = open(self.filename, 'rb')
all_pairs_scores_dict = pickle.load(savefile)
savefile.close()
else:
all_pairs_scores_dict = {}
num_treated = 0
for words in tqdm.tqdm(list_of_words, total = len(list_of_words)):
word1, word2 = words
key = word1 + '---' + word2
if key in all_pairs_scores_dict.keys(): # If we have already computed this key go to the next
continue
scores_dict = self.batch_compute_one_pair_scores(logical_words_ids, words)
all_pairs_scores_dict[key] = scores_dict
num_treated += 1
if num_treated % 20000 == 0: # Save from time to time
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
self.all_pairs_scores_dict = all_pairs_scores_dict
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
def compute_one_pair_scores(self, logical_words_ids, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
word1, word2 = words
# Construct sentences
scores_dict = {}
for key in self.list_of_keys:
sentence, sentence_reverse = self.dict_of_prompts[key]
sentence = sentence.replace('<WORD1>', word1).replace('<WORD2>', word2).replace('<LW>', self.tokenizer.mask_token)
sentence_reverse = sentence_reverse.replace('<WORD1>', word2).replace('<WORD2>', word1).replace('<LW>', self.tokenizer.mask_token)
# Compute scores for sentence
encoding = self.tokenizer(sentence,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
mask_pos = self.find_mask_pos(input_ids)
scores = self._compute_model_score(input_ids, attention_mask, logical_words_ids, mask_pos)
# Compute scores for sentence_reverse
encoding_reverse = self.tokenizer(sentence_reverse,
return_tensors='pt'
)
input_ids_reverse = encoding_reverse['input_ids'].to(self.device)
attention_mask_reverse = encoding_reverse['attention_mask'].to(self.device)
mask_pos_reverse = self.find_mask_pos(input_ids_reverse)
scores_reverse = self._compute_model_score(input_ids_reverse, attention_mask_reverse, logical_words_ids, mask_pos_reverse)
scores_dict[key] = [scores, scores_reverse]
return scores_dict
def batch_compute_one_pair_scores(self, logical_words_ids, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
word1, word2 = words
# Construct sentences
scores_dict = {}
sentences = []
for key in self.list_of_keys:
sentence, sentence_reverse = self.dict_of_prompts[key]
sentence = sentence.replace('<WORD1>', word1).replace('<WORD2>', word2).replace('<LW>', self.tokenizer.mask_token)
sentence_reverse = sentence_reverse.replace('<WORD1>', word2).replace('<WORD2>', word1).replace('<LW>', self.tokenizer.mask_token)
sentences.append(sentence)
sentences.append(sentence_reverse)
# Compute scores for sentence
encoding = self.tokenizer(sentences,
padding = True,
return_tensors='pt')
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
mask_pos = self.find_mask_pos(input_ids)
scores = self._batch_compute_model_score(input_ids, attention_mask, logical_words_ids, mask_pos)
for k in range(len(self.list_of_keys)):
key = self.list_of_keys[k]
scores_dict[key] = [scores[2*k], scores[2*k + 1]]
return scores_dict
def _compute_model_score(self, input_ids, attention_mask, masked_token_ids, mask_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs_n_ranks = self.model.compute_multiple_mono_token(input_ids, attention_mask, mask_pos, masked_token_ids)
# Compute scores
scores = probs_n_ranks[:,0] # drop rank
return scores
def _batch_compute_model_score(self, input_ids, attention_mask, masked_token_ids, mask_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs = self.model.compute_batch_multiple_mono_token(input_ids, attention_mask, mask_pos, masked_token_ids)
return probs
def find_mask_pos(self, ids_seq):
return torch.where(ids_seq == 103)[1]
|
StarcoderdataPython
|
8078361
|
#
# Open Source SAM-BA Programmer
# Copyright (C) <NAME>, 2016.
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
#
# Released under a MIT license, see LICENCE.txt.
from . import Transport
import logging
class Serial(Transport.TransportBase):
"""Serial transport for SAM-BA devices using a COM port."""
def __init__(self, port, baud=115200):
"""Constructs a Serial transport.
Args:
port : Serial port to open (e.g. "COM1" or "/dev/ttyACM0").
baud : Baud rate to use.
log_to_console: If `True`, traffic will be logged to the console.
"""
try:
import serial
except ImportError as e:
self.LOG.fatal('Could not import pyserial library. Is it installed?')
raise e
self.serialport = serial.Serial(port=port,
baudrate=baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1)
def __del__(self):
"""Destructor for the Serial transport, closing all resources."""
try:
self.serialport.close()
except:
pass
def _to_byte_array(self, data):
"""Encodes an input string or list of values/characters into a flat
byte array of bytes. This can be used to convert a Unicode string
(using an ASCII only encoding) or list of characters and integers
into a flat set of bytes for transmission.
Args:
data : input data to convert
Returns:
Flat byte array.
"""
if isinstance(data, str):
return bytearray(data.encode('ascii', 'ignore'))
else:
return bytearray([ord(d) if isinstance(d, str) else d for d in data])
def read(self, length):
"""Reads a given number of bytes from the serial interface.
Args:
length : Number of bytes to read.
Returns:
Byte array of the received data.
Raises:
TimeoutError if the read operation timed out.
"""
data = self.serialport.read(length)
if len(data) != length:
raise Transport.TimeoutError()
self.LOG.debug('Receive %d bytes %s' % (len(data), [b for b in data]))
return bytearray(data)
def write(self, data):
"""Writes a given number of bytes to the serial interface.
Args:
data : Bytes to write.
"""
self.LOG.debug('Send %d bytes: %s' % (len(data), [b for b in data]))
self.serialport.write(self._to_byte_array(data))
|
StarcoderdataPython
|
3536967
|
<filename>build_databases/build_global_power_plant_database.py
# This Python file uses the following encoding: utf-8
"""
Global Power Plant Database
build_global_power_plant_database.py
Builds the Global Power Plant Database from various data sources.
- Log build to DATABASE_BUILD_LOG_FILE
- Use country and fuel information as specified in powerplant_database.py
- Use matches/concordances as specified in powerplants_database.py
TO-DOS:
- Alias list for power plants
"""
import csv
import time
import argparse
import sys, os
sys.path.insert(0, os.pardir)
import powerplant_database as pw
### PARAMETERS ###
COUNTRY_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="COUNTRY-Database.bin")
WRI_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="WRI-Database.bin")
GEO_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="GEODB-Database.bin")
CARMA_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="CARMA-Database.bin")
DATABASE_CSV_SAVEFILE = pw.make_file_path(fileType="output", filename="global_power_plant_database.csv")
DATABASE_BUILD_LOG_FILE = pw.make_file_path(fileType="output", filename="database_build_log.txt")
DATABASE_CSV_DUMPFILE = pw.make_file_path(fileType="output", filename="global_power_plant_database_data_dump.csv")
MINIMUM_CAPACITY_MW = 1
parser = argparse.ArgumentParser()
parser.add_argument("--dump", help="dump all the data", action="store_true")
DATA_DUMP = True if parser.parse_args().dump else False
# open log file
f_log = open(DATABASE_BUILD_LOG_FILE, 'a')
f_log.write('Starting Global Power Plant Database build run at {0}.\n'.format(time.ctime()))
# print summary
print("Starting Global Power Plant Database build; minimum plant size: {0} MW.".format(MINIMUM_CAPACITY_MW))
# make country dictionary
country_dictionary = pw.make_country_dictionary()
# make powerplants dictionary
core_database = {}
datadump = {}
# make plant condcordance dictionary
plant_concordance = pw.make_plant_concordance()
print("Loaded concordance file with {0} entries.".format(len(plant_concordance)))
carma_id_used = [] # Record matched carma_ids
# STEP 0: Read in source databases.
# Identify countries with automated data from .automated flag.
print("Loading source databases...")
country_databases = {}
for country_name, country in country_dictionary.iteritems():
if country.automated == 1:
country_code = country.iso_code
database_filename = COUNTRY_DATABASE_FILE.replace("COUNTRY", country_code)
country_databases[country_name] = pw.load_database(database_filename)
print("Loaded {0} plants from {1} database.".format(len(country_databases[country_name]), country_name))
# Load multi-country databases.
wri_database = pw.load_database(WRI_DATABASE_FILE)
print("Loaded {0} plants from WRI database.".format(len(wri_database)))
geo_database = pw.load_database(GEO_DATABASE_FILE)
print("Loaded {0} plants from GEO database.".format(len(geo_database)))
carma_database = pw.load_database(CARMA_DATABASE_FILE)
print("Loaded {0} plants from CARMA database.".format(len(carma_database)))
# Track counts using a dict with keys corresponding to each data source
db_sources = country_databases.keys()
db_sources.extend(["WRI","GEO","WRI with GEO lat/long data","WRI with CARMA lat/long data"])
database_additions = {dbname: {'count': 0, 'capacity': 0} for dbname in db_sources}
# STEP 1: Add all data (capacity >= 1MW) from countries with automated data to the Database
for country_name, database in country_databases.iteritems():
country_code = country_dictionary[country_name].iso_code
print("Adding plants from {0}.".format(country_dictionary[country_name].primary_name))
for plant_id, plant in database.iteritems():
datadump[plant_id] = plant
if plant.capacity >= MINIMUM_CAPACITY_MW:
if (plant.location.latitude and plant.location.longitude) and (plant.location.latitude != 0 and plant.location.longitude != 0):
core_database[plant_id] = plant
database_additions[country_name]['count'] += 1
database_additions[country_name]['capacity'] += plant.capacity
else:
plant.idnr = plant_id + u",No"
else:
plant.idnr = plant_id + u",No"
# STEP 2: Go through WRI database and triage plants
print("Adding plants from WRI internal database.")
for plant_id, plant in wri_database.iteritems():
# Cases to skip
if not isinstance(plant, pw.PowerPlant):
f_log.write('Error: plant {0} is not a PowerPlant object.\n'.format(plant_id))
continue
if plant.country not in country_dictionary.keys():
f_log.write('Error: country {0} not recognized.\n'.format(plant.country))
continue
# Skip plants with data loaded from an automated script
if country_dictionary[plant.country].automated:
continue
# Skip plants in countries where we will use GEO data
if country_dictionary[plant.country].use_geo:
continue
# skip plants in countries where WRI-collected data is already handled in an automated script
if country_dictionary[plant.country].wri_data_built_in:
continue
datadump[plant_id] = plant
# Skip plants below minimum capacity cutoff
if plant.capacity < MINIMUM_CAPACITY_MW:
continue
# STEP 2.1: If plant has lat/long information, add it to the Database
if (plant.location.latitude and plant.location.longitude) and (plant.location.latitude != 0 and plant.location.longitude != 0):
plant.idnr = plant_id
#plant.coord_source = u"WRI data"
core_database[plant_id] = plant
database_additions['WRI']['count'] += 1
database_additions['WRI']['capacity'] += plant.capacity
continue
# STEP 2.2: If plant is matched to GEODB, add to the Database using GEODB lat/long
if plant_id in plant_concordance:
matching_geo_id = plant_concordance[plant_id]['geo_id']
if matching_geo_id:
try:
plant.location = geo_database[matching_geo_id].location
except:
f_log.write("Matching error: no GEO location for WRI plant {0}, GEO plant {1}\n".format(plant_id, matching_geo_id))
continue
if plant.location.latitude and plant.location.longitude:
plant.idnr = plant_id
plant.coord_source = u"GEODB"
core_database[plant_id] = plant
database_additions["WRI with GEO lat/long data"]['count'] += 1
database_additions["WRI with GEO lat/long data"]['capacity'] += plant.capacity
continue
# STEP 2.3: If plant is matched to CARMA, add to the Database using CARMA lat/long
if plant_id in plant_concordance:
matching_carma_id = plant_concordance[plant_id]['carma_id']
if matching_carma_id:
try:
plant.location = carma_database[matching_carma_id].location
except:
f_log.write("Matching error: no CARMA location for WRI plant {0}, CARMA plant {1}\n".format(plant_id,matching_carma_id))
continue
if plant.location.latitude and plant.location.longitude:
plant.idnr = plant_id
plant.coord_source = u"CARMA"
core_database[plant_id] = plant
carma_id_used.append(matching_carma_id)
database_additions["WRI with CARMA lat/long data"]['count'] += 1
database_additions["WRI with CARMA lat/long data"]['capacity'] += plant.capacity
continue
# Note: Would eventually like to refine CARMA locations - known to be inaccurate in some cases
# STEP 3: Go through GEO database and add plants from small countries
# Plants in this database only have numeric ID (no prefix) because of concordance matching
for plant_id,plant in geo_database.iteritems():
# Catch errors if plants do not have a correct country assigned
datadump[plant_id] = plant
if plant.country not in country_dictionary.keys():
print("Plant {0} has country {1} - not found.".format(plant_id,plant.country))
continue
if country_dictionary[plant.country].use_geo:
if plant.capacity < 1:
continue
if (plant.location.latitude and plant.location.longitude) and (plant.location.latitude != 0 and plant.location.longitude != 0):
#plant.coord_source = u"GEO"
plant.idnr = plant_id
try:
database_additions['GEO']['capacity'] += plant.capacity
except:
f_log.write("Attribute Warning: GEO plant {0} does not have valid capacity information <{1}>\n".format(plant_id, plant.capacity))
else:
core_database[plant_id] = plant
database_additions['GEO']['count'] += 1
# STEP 3.1: Append another multinational database
wiki_solar_file = pw.make_file_path(fileType="raw", subFolder="Wiki-Solar", filename="wiki-solar-plant-additions-2019.csv")
country_lookup = {cc.iso_code: cc.primary_name for cc in country_dictionary.values()}
# FIXME: patch lookup with additional geographies relevant in the wikisolar dataset
country_lookup.update({
# Bonaire, Sint Eustatius and Saba
"BES": "Netherlands",
# Cayman Islands
"CYM": "United Kingdom",
# Puerto Rico
"PRI": "United States of America",
# Reunion
"REU": "France",
# The Virgin Islands of the United States
"VIR": "United States of America",
})
wiki_solar_skip = {
'United States of America': (0, 0)
}
wiki_solar_whitelist = ['PRI']
wiki_solar_count = 0
with open(wiki_solar_file) as fin:
wiki_solar = csv.DictReader(fin)
for solar_plant in wiki_solar:
country = country_lookup.get(solar_plant['country'], '')
plant_idnr = 'WKS{0:07d}'.format(int(solar_plant['id']))
plant_location = pw.LocationObject(latitude=float(solar_plant['lat']), longitude=float(solar_plant['lon']))
plant = pw.PowerPlant(
plant_idnr=plant_idnr,
plant_name=solar_plant['name'],
plant_country=country,
plant_capacity=float(solar_plant['capacity']),
plant_location=plant_location,
plant_coord_source='Wiki-Solar',
plant_source='Wiki-Solar',
plant_source_url='https://www.wiki-solar.org',
plant_primary_fuel = 'Solar'
)
if (country in wiki_solar_skip) and \
(solar_plant["country"] not in wiki_solar_whitelist):
_n, _capacity = wiki_solar_skip[country]
wiki_solar_skip[country] = (_n + 1, _capacity + plant.capacity)
continue
core_database[plant_idnr] = plant
wiki_solar_count += 1
print("Loaded {0} plants from Wiki-Solar database.".format(wiki_solar_count))
for _country, _vals in wiki_solar_skip.iteritems():
if _vals[0] != 0:
print("...skipped {0} plants ({1} MW) for {2}.".format(_vals[0], _vals[1], _country))
# STEP 3.9: Add in multinational generation datasets
COUNTRY_DATABASE_FILE = pw.make_file_path(fileType="src_bin", filename="COUNTRY-Database.bin")
JRC_OPEN_PERFORMANCE = pw.make_file_path('raw', 'JRC-PPDB-OPEN', 'JRC_OPEN_PERFORMANCE.csv')
JRC_OPEN_UNITS = pw.make_file_path('raw', 'JRC-PPDB-OPEN', 'JRC_OPEN_UNITS.csv')
JRC_OPEN_LINKAGES = pw.make_file_path('raw', 'JRC-PPDB-OPEN', 'JRC_OPEN_LINKAGES.csv')
JRC_OPEN_TEMPORAL = pw.make_file_path('raw', 'JRC-PPDB-OPEN', 'JRC_OPEN_TEMPORAL.csv')
JRC_BLACKLIST = set([
# blacklist created looking at obviously-wrong matches based on country designation
# eic_g, # bad_wri_id
'50WG00000001097W', # 'BRA0030768'
'48W000000SUTB-1P', # 'USA0060878'
'26WUCNTRLDSCND24', # 'CAN0008429'
'26WUCNTRLDSCND16', # 'CAN0008429'
'50WG000000019861', # 'BRA0029858'
'50WG000000019853', # 'BRA0029858'
'50WGI00000019875', # 'BRA0029858'
'48W000000ROOS-1P', # 'USA0006202'
])
# {wri_id: [eic_g_1, eic_g_2, ...], ...}
gppd_ppdb_link = {}
with open(JRC_OPEN_LINKAGES) as fin:
r = csv.DictReader(fin)
for row in r:
wri_id = row['WRI_id']
gen_id = row['eic_g']
if gen_id: # some blank gen_ids, which currently don't have wri_id matches
gppd_ppdb_link[wri_id] = gppd_ppdb_link.get(wri_id, []) + [gen_id]
# {yr: {eic_g: (gen, time_coverage), ...}, ...}
ppdb_generation = {str(yr): {} for yr in [2015, 2016, 2017, 2018]}
with open(JRC_OPEN_TEMPORAL) as fin:
r = csv.DictReader(fin)
skipped_generation = 0
for row in r:
year_data = ppdb_generation[row['cyear']]
# value is in MWh according to `datapackage.json` in JRC-PPDB-OPEN
year_data[row['eic_g']] = (row['Generation'], row['time_coverage'])
# desired lookup structure: {plant1: {year1: val, year2: val2, ...}, ...}
agg_gen_by_gppd = {}
# per-unit time availability
time_threshold = '0.950' # yes this is a string
# WRI plants that aren't having the estimation applied [(plant1, yearA), ...]
jrc_skipped_plants = []
for wri_id, gen_ids in gppd_ppdb_link.items():
plant_totals = {}
for year in map(str, [2015, 2016, 2017]):
year_data = ppdb_generation[year]
year_gen_val = 0
accepted_gen_ids = []
for gen_id in gen_ids:
gen, time_coverage = year_data.get(gen_id, (0, '0.000'))
if time_coverage < time_threshold or gen_id in JRC_BLACKLIST:
jrc_skipped_plants.append((wri_id, int(year)))
break
year_gen_val += float(gen)
accepted_gen_ids.append(gen_id)
if set(accepted_gen_ids) == set(gen_ids):
# convert MWh to GWh and assign value for the year
plant_totals[int(year)] = year_gen_val / 1000
agg_gen_by_gppd[wri_id] = plant_totals
for pid, pp in core_database.items():
if agg_gen_by_gppd.get(pid, {}):
new_generation = []
for yr, val in agg_gen_by_gppd[pid].items():
gen = pw.PlantGenerationObject.create(val, year=yr, source='JRC-PPDB-OPEN')
new_generation.append(gen)
if new_generation:
pp.generation = new_generation
#print("Added {0} plants ({1} MW) from {2}.".format(data['count'], data['capacity'], dbname))
# STEP 4: Estimate generation for plants without reported generation for target year
count_plants_with_generation = 0
#for plant_id,plant in core_database.iteritems():
# if plant.generation != pw.NO_DATA_OTHER:
# count_plants_with_generation += 1
#print('Of {0} total plants, {1} have reported generation data.'.format(len(core_database),count_plants_with_generation))
print('Estimating generation...')
estimated_plants = pw.estimate_generation(core_database)
print('...estimated for {0} plants.'.format(estimated_plants))
# STEP 4.1: Add WEPP ID matches
pw.add_wepp_id(core_database)
if DATA_DUMP:
pw.add_wepp_id(datadump)
# STEP 5: Write the Global Power Plant Database
for dbname, data in database_additions.iteritems():
print("Added {0} plants ({1} MW) from {2}.".format(data['count'], data['capacity'], dbname))
f_log.close()
print("Loaded {0} plants to the Global Power Plant Database.".format(len(core_database)))
pw.write_csv_file(core_database, DATABASE_CSV_SAVEFILE)
print("Global Power Plant Database built.")
# STEP 6: Dump Data
if DATA_DUMP:
print("Dumping all the data...")
# STEP 6.1: Label plants in datadump
pw_idnrs = core_database.keys()
for plant_id,plant in datadump.iteritems():
if plant_id in pw_idnrs:
plant.idnr = plant_id + ",Yes"
else:
plant.idnr = plant_id + ",No"
# STEP 6.2: Add unused CARMA plants
for plant_id,plant in carma_database.iteritems():
plant.coord_source = u"CARMA data"
if plant_id in carma_id_used:
continue
else:
plant.idnr = plant_id + ",No"
datadump[plant_id] = plant
print("Dumped {0} plants.".format(len(datadump)))
pw.write_csv_file(datadump, DATABASE_CSV_DUMPFILE,dump=True)
print("Data dumped.")
print("Finished.")
|
StarcoderdataPython
|
3353603
|
<reponame>namanh11611/FlaskCrypto
import logging
import os
import platform
import socket
import sys
import tempfile
import time
import zipfile
from http import HTTPStatus
from urllib.request import urlopen
import yaml
from pyngrok.exception import PyngrokNgrokInstallError, PyngrokSecurityError, PyngrokError
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__version__ = "5.0.0"
logger = logging.getLogger(__name__)
CDN_URL_PREFIX = "https://bin.equinox.io/c/4VmDzA7iaHb/"
PLATFORMS = {
"darwin_x86_64": CDN_URL_PREFIX + "ngrok-stable-darwin-amd64.zip",
"darwin_i386": CDN_URL_PREFIX + "ngrok-stable-darwin-386.zip",
"windows_x86_64": CDN_URL_PREFIX + "ngrok-stable-windows-amd64.zip",
"windows_i386": CDN_URL_PREFIX + "ngrok-stable-windows-386.zip",
"linux_x86_64_arm": CDN_URL_PREFIX + "ngrok-stable-linux-arm64.zip",
"linux_i386_arm": CDN_URL_PREFIX + "ngrok-stable-linux-arm.zip",
"linux_i386": CDN_URL_PREFIX + "ngrok-stable-linux-386.zip",
"linux_x86_64": CDN_URL_PREFIX + "ngrok-stable-linux-amd64.zip",
"freebsd_x86_64": CDN_URL_PREFIX + "ngrok-stable-freebsd-amd64.zip",
"freebsd_i386": CDN_URL_PREFIX + "ngrok-stable-freebsd-386.zip",
"cygwin_x86_64": CDN_URL_PREFIX + "ngrok-stable-windows-amd64.zip",
}
DEFAULT_DOWNLOAD_TIMEOUT = 6
DEFAULT_RETRY_COUNT = 0
_config_cache = None
_print_progress_enabled = True
def get_ngrok_bin():
"""
Get the ``ngrok`` executable for the current system.
:return: The name of the ``ngrok`` executable.
:rtype: str
"""
system = platform.system().lower()
if system in ["darwin", "linux", "freebsd"]:
return "ngrok"
elif system in ["windows", "cygwin"]: # pragma: no cover
return "ngrok.exe"
else: # pragma: no cover
raise PyngrokNgrokInstallError("\"{}\" is not a supported platform".format(system))
def install_ngrok(ngrok_path, **kwargs):
"""
Download and install the latest ``ngrok`` for the current system, overwriting any existing contents
at the given path.
:param ngrok_path: The path to where the ``ngrok`` binary will be downloaded.
:type ngrok_path: str
:param kwargs: Remaining ``kwargs`` will be passed to :func:`_download_file`.
:type kwargs: dict, optional
"""
logger.debug(
"Installing ngrok to {}{} ...".format(ngrok_path, ", overwriting" if os.path.exists(ngrok_path) else ""))
ngrok_dir = os.path.dirname(ngrok_path)
if not os.path.exists(ngrok_dir):
os.makedirs(ngrok_dir)
arch = "x86_64" if sys.maxsize > 2 ** 32 else "i386"
if platform.uname()[4].startswith("arm") or \
platform.uname()[4].startswith("aarch64"):
arch += "_arm"
system = platform.system().lower()
if "cygwin" in system:
system = "cygwin"
plat = system + "_" + arch
try:
url = PLATFORMS[plat]
logger.debug("Platform to download: {}".format(plat))
except KeyError:
raise PyngrokNgrokInstallError("\"{}\" is not a supported platform".format(plat))
try:
download_path = _download_file(url, **kwargs)
_install_ngrok_zip(ngrok_path, download_path)
except Exception as e:
raise PyngrokNgrokInstallError("An error occurred while downloading ngrok from {}: {}".format(url, e))
def _install_ngrok_zip(ngrok_path, zip_path):
"""
Extract the ``ngrok`` zip file to the given path.
:param ngrok_path: The path where ``ngrok`` will be installed.
:type ngrok_path: str
:param zip_path: The path to the ``ngrok`` zip file to be extracted.
:type zip_path: str
"""
_print_progress("Installing ngrok ... ")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
logger.debug("Extracting ngrok binary to {} ...".format(zip_path))
zip_ref.extractall(os.path.dirname(ngrok_path))
os.chmod(ngrok_path, int("777", 8))
_clear_progress()
def get_ngrok_config(config_path, use_cache=True):
"""
Get the ``ngrok`` config from the given path.
:param config_path: The ``ngrok`` config path to read.
:type config_path: str
:param use_cache: Use the cached version of the cache (if populated).
:type use_cache: bool
:return: The ``ngrok`` config.
:rtype: dict
"""
global _config_cache
if not _config_cache or not use_cache:
with open(config_path, "r") as config_file:
config = yaml.safe_load(config_file)
if config is None:
config = {}
_config_cache = config
return _config_cache
def install_default_config(config_path, data=None):
"""
Install the given data to the ``ngrok`` config. If a config is not already present for the given path, create one.
Before saving new data to the default config, validate that they are compatible with ``pyngrok``.
:param config_path: The path to where the ``ngrok`` config should be installed.
:type config_path: str
:param data: A dictionary of things to added to the default config.
:type data: dict, optional
"""
if data is None:
data = {}
config_dir = os.path.dirname(config_path)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
if not os.path.exists(config_path):
open(config_path, "w").close()
config = get_ngrok_config(config_path, use_cache=False)
config.update(data)
validate_config(config)
with open(config_path, "w") as config_file:
logger.debug("Installing default ngrok config to {} ...".format(config_path))
yaml.dump(config, config_file)
def validate_config(data):
"""
Validate that the given dict of config items are valid for ``ngrok`` and ``pyngrok``.
:param data: A dictionary of things to be validated as config items.
:type data: dict
"""
if data.get("web_addr", None) is False:
raise PyngrokError("\"web_addr\" cannot be False, as the ngrok API is a dependency for pyngrok")
elif data.get("log_format") == "json":
raise PyngrokError("\"log_format\" must be \"term\" to be compatible with pyngrok")
elif data.get("log_level", "info") not in ["info", "debug"]:
raise PyngrokError("\"log_level\" must be \"info\" to be compatible with pyngrok")
def _download_file(url, retries=0, **kwargs):
"""
Download a file to a temporary path and emit a status to stdout (if possible) as the download progresses.
:param url: The URL to download.
:type url: str
:param retries: The number of retries to attempt, if download fails.
:type retries: int, optional
:param kwargs: Remaining ``kwargs`` will be passed to :py:func:`urllib.request.urlopen`.
:type kwargs: dict, optional
:return: The path to the downloaded temporary file.
:rtype: str
"""
kwargs["timeout"] = kwargs.get("timeout", DEFAULT_DOWNLOAD_TIMEOUT)
if not url.lower().startswith("http"):
raise PyngrokSecurityError("URL must start with \"http\": {}".format(url))
try:
_print_progress("Downloading ngrok ...")
logger.debug("Download ngrok from {} ...".format(url))
local_filename = url.split("/")[-1]
response = urlopen(url, **kwargs)
status_code = response.getcode()
if status_code != HTTPStatus.OK:
logger.debug("Response status code: {}".format(status_code))
return None
length = response.getheader("Content-Length")
if length:
length = int(length)
chunk_size = max(4096, length // 100)
else:
chunk_size = 64 * 1024
download_path = os.path.join(tempfile.gettempdir(), local_filename)
with open(download_path, "wb") as f:
size = 0
while True:
buffer = response.read(chunk_size)
if not buffer:
break
f.write(buffer)
size += len(buffer)
if length:
percent_done = int((float(size) / float(length)) * 100)
_print_progress("Downloading ngrok: {}%".format(percent_done))
_clear_progress()
return download_path
except socket.timeout as e:
if retries < DEFAULT_RETRY_COUNT:
time.sleep(0.5)
return _download_file(url, retries + 1, **kwargs)
else:
raise e
def _print_progress(line):
if _print_progress_enabled:
sys.stdout.write("{}\r".format(line))
sys.stdout.flush()
def _clear_progress(spaces=100):
if _print_progress_enabled:
sys.stdout.write((" " * spaces) + "\r")
sys.stdout.flush()
|
StarcoderdataPython
|
1846324
|
import numpy as np
import torch
import os
import sys
from matplotlib import pyplot as plt
import torch.nn as nn
from xplain.attr import LayerIntegratedGradients, LayerGradientXActivation
import skimage.io
import torchvision
import pickle
import pandas as pd
import scipy.interpolate as interpolate
from torch.utils.data import TensorDataset, DataLoader
import helper
import argparse
from tqdm import tqdm
import warnings
from helper import pre_process_image
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
warnings.filterwarnings("ignore")
# attribute_to_layer_input=True,
def densenet_feat_preprocess(x):
out = F.relu(x, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
return out
def shufflenet_feat_preprocess(x):
x = x.mean([2, 3]) # globalpool
return x
def inception_feat_preprocess(x):
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 2048 x 1 x 1
x = F.dropout(x, training=False)
# N x 2048 x 1 x 1
x = torch.flatten(x, 1)
return x
model_to_layer_dict = {
"DenseNet": ("features.norm5", None, True),
"ResNet": ("avgpool", None, False),
"VGG": ("classifier", 4, False),
"GoogLeNet": ("dropout", None, False),
# "Inception3": ("dropout", None, False),
"Inception3": ("Mixed_7c", None, True),
"SqueezeNet": ("features", None, False),
"ShuffleNetV2": ("conv5", 2, True),
"MobileNetV2": ("classifier", 0, False),
}
model_to_feature_dict = {
"DenseNet": ("classifier", None, densenet_feat_preprocess),
"ResNet": ("fc", None, None),
"VGG": ("classifier", 6, None),
"GoogLeNet": ("fc", None, None),
# "Inception3": ("fc", None, None),
"Inception3": ("fc", None, inception_feat_preprocess),
"SqueezeNet": ("classifier", None, None),
"ShuffleNetV2": ("fc", None, shufflenet_feat_preprocess),
"MobileNetV2": ("classifier", 1, None),
}
def get_layer(model, layer_name, layer_index):
_layer_names = layer_name.split(".")
en = model
for lm in _layer_names:
en = getattr(en, lm)
if layer_index is not None:
en = en[layer_index]
return en
def get_dl(file_list, round):
labels = torch.tensor([int(en.split("/")[-1].split("_")[1]) for en in file_list])
img_batch_load = []
for img in file_list:
img = pre_process_image(img, round=round)
batch_data = torch.FloatTensor(img)
img_batch_load.append(batch_data)
img_batch = torch.stack(img_batch_load, 0).squeeze()
dataset = TensorDataset(torch.Tensor(img_batch), labels)
dl = DataLoader(dataset, batch_size=2)
return dl
def identify_bad_neurons(target, attribution, logits_per_class):
tmp = []
for cls in range(num_cls):
if cls == target:
continue
_idx = logits_per_class[cls].argsort()[::-1][1:3]
# if _idx != target:
# if not target in _idx:
# continue
_idx = (labels == cls).nonzero()[0]
# import ipdb; ipdb.set_trace()
# attribution_mean = attribution[_idx].mean(0)[..., target]
# attribution_mean = attribution_mean.flatten()
if attribution.ndim > 3:
attribution = attribution.mean(-2).mean(-2)
assert attribution.ndim == 3, "Check size of attribution"
attribution_mean = attribution[_idx].mean(0)[:, target]
_idx = attribution_mean > 0
try:
thresh = np.percentile(attribution_mean[_idx], 20)
except:
# If all attributions are < 0
thresh = 0
attribution_mean[attribution_mean < thresh] = 0
tmp.append(attribution_mean)
assert np.mean(tmp, 0).ndim == 1, "bad neurons have ndim > 1"
bad_neurons = np.mean(tmp, 0).argsort()[::-1].tolist()
assert bad_neurons
return bad_neurons
def ablation_plot(dataloader, bad_neurons, target, activation_value=25):
acc_all = []
nn_all = []
N = int(NUM_NEURONS)
for nn in range(0, N, 2):
pred = []
gnd = []
logits_clean = []
for data in feat_dl:
_feat, label = data
_feat = _feat.to(device)
# if _feat.ndim != 2:
# _feat_shape = _feat.shape
# _feat_f = _feat.view(_feat_shape[0], -1)
# _feat_f[:, bad_neurons[:nn]] = activation_value
# _feat = _feat_f.view(*_feat_shape)
# else:
# _feat[:, bad_neurons[:nn]] = activation_value
if feat_preprocess is not None:
_feat = feat_preprocess(_feat)
if _feat.ndim > 2:
_feat[:, bad_neurons[:nn], ...] = activation_value
else:
_feat[:, bad_neurons[:nn]] = activation_value
logits = _feat_layer(_feat).squeeze()
logits_clean.append(logits.data.cpu().numpy())
pred.append(logits.argmax(1).data.cpu().numpy())
gnd.append(label.numpy())
logits_clean = np.vstack(logits_clean)
acc = np.mean(np.hstack(gnd) == np.hstack(pred)) * 100
acc_all.append(acc)
nn_all.append(int(nn / NUM_NEURONS * 100))
kk = 0
# % neurons where perf = P
f = interpolate.interp1d(acc_all, nn_all)
try:
P = 20
position_0 = f(P)
except:
position_0 = 0
try:
P = 40
position_1 = f(P)
except:
position_1 = 0
if target < 12:
plt.plot(nn_all, acc_all)
plt.plot(nn_all, 20 * np.ones((len(nn_all))))
plt.plot(nn_all, 40 * np.ones((len(nn_all))), color="red")
plt.ylabel("Accuracy")
plt.xlabel("Percentage of neurons triggered in the layer")
plt.title(f"Ablation for class {target}, Position={position_1}")
print(target, ":", position_0, position_1)
return acc_all, nn_all, position_1
def forward_fn(
model, dataloader, compute_attribution=True, use_internal_batch_size=True
):
pred = []
gnd = []
logits = []
attribution = []
labels = []
feat = []
for data in tqdm(dataloader):
img, label = data
labels.append(label)
model(img.to(device))
_feat = hook_fn_feat_layer.outputs
if isinstance(_feat, list):
_feat = _feat[0]
feat.append(_feat.data.cpu().numpy())
# import ipdb; ipdb.set_trace()
if feat_preprocess is not None:
_feat = feat_preprocess(_feat)
_logits = _feat_layer(_feat).squeeze()
logits.append(_logits.data.cpu().numpy())
pred.append(_logits.argmax(1).data.cpu().numpy())
gnd.append(label.numpy())
if compute_attribution:
_attrib = []
for c in range(num_cls):
if use_internal_batch_size:
_atr = attrib_fn.attribute(
img.to(device),
target=torch.Tensor([c, c]).to(device).long(),
internal_batch_size=4,
attribute_to_layer_input=attribute_to_layer_input,
)
else:
_atr = attrib_fn.attribute(
img.to(device),
target=torch.Tensor([c, c]).to(device).long(),
attribute_to_layer_input=attribute_to_layer_input,
)
if isinstance(_atr, tuple):
_atr = _atr[0]
_attrib.append(_atr.unsqueeze(-1).cpu().data.numpy())
attribution.append(np.concatenate(_attrib, axis=-1))
logits = np.vstack(logits)
labels = np.hstack(labels)
attribution = np.vstack(attribution)
attribution = np.squeeze(attribution)
feat = np.vstack(feat)
acc = np.mean(np.hstack(gnd) == np.hstack(pred)) * 100
print("Accuracy is ", acc)
print("feat_shape: ", feat.shape)
print("attr_shape: ", attribution.shape)
return logits, labels, attribution, feat, acc
# def get_feature(meta_idx, model_dir, meta_data):
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get attribution")
# parser.add_argument("--meta_idx", type=int, default=439)
parser.add_argument("--model_name", type=str, default="id-00000823")
parser.add_argument(
"--model_dir",
type=str,
default="/data/SRI/projects_2020/trojAI/round2-dataset-train",
)
parser.add_argument(
"--meta_data",
type=str,
default="/data/SRI/projects_2020/trojAI/round2-dataset-train/METADATA.csv",
)
parser.add_argument("--results_dir", type=str, default="curve_features")
parser.add_argument("--attributions_dir", type=str, default="attribution_features")
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--mult_factor", type=int, default=2)
parser.add_argument("--round", type=int, default=2)
parser.add_argument(
"--attribution_fn", type=str, default="IG", choices=["IG", "GradxAct"]
)
args = parser.parse_args()
device = args.device
# meta_idx = args.meta_idx
model_dir = args.model_dir
meta_data = args.meta_data
MULT_FACTOR = args.mult_factor
# meta_idx = 64 #inceptionv3
# meta_idx = 371 #densenet121
# meta_idx = 269 #densenet161
# meta_idx = 342 #densenet169
# meta_idx = 205 #densenet201
# meta_idx = 489 # shufflenet1_0
# meta_idx = 463 #shufflenet1_5
# meta_idx = 152 #shufflenet2_0
# meta_idx = 272 #squeezenetv1_0 .. acc not coming
# meta_idx = 18 #squeezenetv1_1
meta_data = pd.read_csv(meta_data)
# model_info = meta_data.loc[meta_idx]
model_info = meta_data[meta_data["model_name"] == args.model_name]
model_name = model_info.model_name.item()
model_curr_dir = os.path.join(model_dir, model_name)
model_filepath = os.path.join(model_curr_dir, "model.pt")
model = torch.load(model_filepath, map_location=device)
model = model.eval()
# print (model)
num_cls = model_info.number_classes.item()
tt = type(model).__name__
# print(model_info.model_architecture)
# print(tt)
info = model_to_layer_dict.get(tt)
layer_name = info[0]
layer_index = info[1]
attribute_to_layer_input = info[2]
_layer = get_layer(model, layer_name, layer_index)
# print (layer_name)
# print (_layer)
hook_fn_feat_layer = helper.hook_fn_nn()
_layer.register_forward_hook(hook_fn_feat_layer)
info = model_to_feature_dict.get(tt)
layer_name = info[0]
layer_index = info[1]
feat_preprocess = info[2]
_feat_layer = get_layer(model, layer_name, layer_index)
if args.attribution_fn == "IG":
attribution_fn = LayerIntegratedGradients
use_internal_batch_size = True
elif args.attribution_fn == "GradxAct":
attribution_fn = LayerGradientXActivation
use_internal_batch_size = False
if attribute_to_layer_input:
attrib_fn = attribution_fn(model, _feat_layer)
else:
attrib_fn = attribution_fn(model, _layer)
# print (model)
# print (_layer)
# print (_feat_layer)
# x = torch.rand((2, 3, 224, 224)).to(device)
# l = model(x)
# print (l.shape)
# f = hook_fn_feat_layer.outputs
# print (f.shape)
# print (attribute_to_layer_input)
# a = attrib_fn.attribute(x,target=torch.Tensor([0,0]).to(device).long(),
# internal_batch_size=4, attribute_to_layer_input=attribute_to_layer_input)
##import ipdb; ipdb.set_trace()
# print (a.shape)
# if feat_preprocess is not None:
# f = feat_preprocess(f)
# o = _feat_layer(f)
# print (o.shape)
# exit()
if args.round == 3:
clean_image_dir = os.path.join(model_curr_dir, "clean_example_data")
else:
clean_image_dir = os.path.join(model_curr_dir, "example_data")
clean_images = [
os.path.join(clean_image_dir, en)
for en in os.listdir(clean_image_dir)
if en.endswith(".png")
]
# dataloader = get_dl(clean_images[:10])
dataloader = get_dl(clean_images, round=args.round)
attribution_path = os.path.join(args.attributions_dir, "{}.npz".format(model_name))
if os.path.exists(attribution_path):
# if False:
data = np.load(attribution_path, allow_pickle=True)
logits = data["logits"]
labels = data["labels"]
attribution = data["attribution"]
feat = data["feat"]
acc = data["acc"]
print("Accuracy is ", acc)
print("feat_shape: ", feat.shape)
print("attr_shape: ", attribution.shape)
else:
logits, labels, attribution, feat, acc = forward_fn(
model, dataloader, use_internal_batch_size=use_internal_batch_size
)
np.savez(
attribution_path,
logits=logits,
labels=labels,
attribution=attribution,
feat=feat,
acc=acc,
)
sys.exit()
feat_ds = TensorDataset(torch.from_numpy(feat), torch.from_numpy(labels))
feat_dl = DataLoader(feat_ds, batch_size=8)
logits_per_class = []
for i in range(num_cls):
idx = (labels == i).nonzero()[0]
logits_per_class.append(logits[idx].mean(0))
logits_per_class = np.asarray(logits_per_class)
NUM_NEURONS = feat.shape[1]
res_file = os.path.join(args.results_dir, "{}.pkl".format(model_name))
print(res_file)
# if not os.path.exists(res_file):
if True:
fig = plt.figure(figsize=[20, 20])
acc_ablation = []
position = []
# ipdb.set_trace()
M = feat.mean(0).max() * MULT_FACTOR
print("Using activation value", M)
for target in range(num_cls):
print(f"Running ablation for class {target}/{num_cls} ")
if target < 12:
ax = plt.subplot(4, 3, target + 1)
bad_neurons = identify_bad_neurons(target, attribution, logits_per_class)
_acc, nn_all, pos = ablation_plot(
dataloader, bad_neurons, target, activation_value=M
)
position.append(pos)
acc_ablation.append(_acc)
pickle.dump((acc_ablation, nn_all, position), open(res_file, "wb"))
position = np.asarray(position)
print(f"Poisoned class is {position.argmin()} with {position.min()}")
plt.savefig(
os.path.join(
args.results_dir,
"{}_{}.jpg".format(model_name, model_info.model_architecture.item()),
)
)
print(f"Finished model {model_name}")
|
StarcoderdataPython
|
343849
|
#
# Copyright (c) 2013, Prometheus Research, LLC
# Released under MIT license, see `LICENSE` for details.
#
import itertools
import types
class registry:
# Stores registered test types and respective record types.
case_types = []
input_types = []
output_types = []
class FieldSpec(object):
# Record field specification.
def __init__(self, attr, key, check=None, default=None,
order=0, required=False, hint=None):
self.attr = attr # attribute name (for Python code)
self.key = key # key name (for YAML documents)
self.check = check # expected type
self.default = default # default value
self.order = order # relative order
self.required = required # mandatory or not
self.hint = hint # one line description
def __repr__(self):
return "%s(attr=%r, key=%r, check=%r, default=%r," \
" order=%r, required=%r, hint=%r)" \
% (self.__class__.__name__,
self.attr, self.key, self.check, self.default,
self.order, self.required, self.hint)
class Field(object):
"""Record field descriptor."""
CTR = itertools.count(1)
REQ = object()
def __init__(self, check=None, default=REQ, order=None, hint=None):
self.check = check # expected type
self.default = default # default value or mandatory field
self.order = order or next(self.CTR) # relative order
self.hint = hint # one line description
def __get__(self, instance, owner):
if instance is None:
return self
raise AttributeError("unset test field")
class RecordMetaclass(type):
def __new__(mcls, name, bases, members):
# Nothing to do if fields are processed already.
if '__fields__' in members:
return type.__new__(mcls, name, bases, members)
# Gather fields from base classes.
fields = set()
for base in bases:
if '__fields__' in base.__dict__:
fields.update(base.__fields__)
# Find and process field descriptors in the class dictionary.
keys = set(field.key for field in fields)
for attr in sorted(members):
dsc = members[attr]
if not isinstance(dsc, Field):
continue
del members[attr]
key = attr
if (key.endswith('_') and
not (key.startswith('_') or key.endswith('__'))):
key = key[:-1]
key = key.replace('_', '-')
assert key not in keys, \
"duplicate field %r" % key
keys.add(key)
check = dsc.check
default = dsc.default
order = dsc.order
required = False
if default is dsc.REQ:
required = True
default = None
hint = dsc.hint
field = FieldSpec(attr, key, check, default,
order=order, required=required, hint=hint)
fields.add(field)
# Store field metadata and generate the class.
fields = sorted(fields, key=(lambda f: f.order))
members['__fields__'] = tuple(fields)
members['__slots__'] = tuple(field.attr for field in fields)
return type.__new__(mcls, name, bases, members)
class Record(object):
"""Base class for test input/output data."""
__metaclass__ = RecordMetaclass
__slots__ = ('__weakref__',)
__owner__ = None # test type which owns the record type
__fields__ = () # list of record fields
@classmethod
def __recognizes__(cls, keys):
"""Checks if the set of keys compatible with the record type."""
# Check if the key set contains all required record fields.
if not any(field.required for field in cls.__fields__):
return False
return all(field.key in keys for field in cls.__fields__
if field.required)
@classmethod
def __load__(cls, mapping):
"""Generates a record from a mapping of field keys and values."""
args = []
for field in cls.__fields__:
if field.key not in mapping:
if field.required:
raise ValueError("missing field %r" % field.key)
arg = field.default
else:
arg = mapping.pop(field.key)
if field.check is not None and not isinstance(arg, field.check):
raise ValueError("invalid field %r: expected %s, got %r"
% (field.key, field.check.__name__, arg))
args.append(arg)
if mapping:
key = sorted(mapping)[0]
raise ValueError("unknown field %r" % key)
return cls(*args)
def __dump__(self):
"""Generates a list of field keys and values."""
mapping = []
for field in self.__fields__:
arg = getattr(self, field.attr)
if arg == field.default and not field.required:
continue
mapping.append((field.key, arg))
return mapping
def __complements__(self, other):
"""
Checks if two records are complementary input and output records for
the same test case.
"""
# Check if the records belong to the same test type and are of
# complementary types (input and output or vice versa).
if not (self.__owner__ is other.__owner__ and
self.__class__ is not other.__class__):
return False
# Find a common mandatory field.
match_field = None
other_attrs = set(field.attr for field in other.__fields__
if field.required)
for field in self.__fields__:
if field.required and field.attr in other_attrs:
match_field = field
break
if match_field is None:
return False
# Check if the field values coincide.
value = getattr(self, match_field.attr)
other_value = getattr(other, match_field.attr)
return (value == other_value)
def __init__(self, *args, **kwds):
# Convert any keywords to positional arguments.
if kwds:
args_tail = []
for field in self.__fields__[len(args):]:
if field.attr not in kwds:
if field.required:
raise TypeError("missing field %r" % field.attr)
else:
args_tail.append(field.default)
else:
args_tail.append(kwds.pop(field.attr))
args = args + tuple(args_tail)
# Complain if there are any keywords left.
if kwds:
attr = sorted(kwds)[0]
if any(field.attr == attr for field in self.__fields__):
raise TypeError("duplicate field %r" % attr)
else:
raise TypeError("unknown field %r" % attr)
# Assign field values.
if len(args) != len(self.__fields__):
raise TypeError("expected %d arguments, got %d"
% (len(self.__fields__), len(args)))
for arg, field in zip(args, self.__fields__):
setattr(self, field.attr, arg)
def __clone__(self, **kwds):
"""Makes a copy with new values for the given fields."""
if not kwds:
return self
args = []
for field in self.__fields__:
if field.attr not in kwds:
arg = getattr(self, field.attr)
else:
arg = kwds.pop(field.attr)
args.append(arg)
if kwds:
attr = sorted(kwds)[0]
raise TypeError("unknown field %r" % attr)
return self.__class__(*args)
def __iter__(self):
# Provided so that ``tuple(self)`` works.
for field in self.__fields__:
yield getattr(self, field.attr)
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
tuple(self) == tuple(other))
def __ne__(self, other):
return (self.__class__ is not other.__class__ or
tuple(self) != tuple(other))
def __str__(self):
# Generates printable representation from the first mandatory field.
title_field = None
for field in self.__fields__:
if field.required:
title_field = field
break
if title_field is None:
return repr(self)
value = str(getattr(self, title_field.attr))
return "%s: %s" % (title_field.key.upper(), value)
def __repr__(self):
# `<name>(<field>=<value>, ...)`
return ("%s(%s)" %
(self.__class__.__name__,
", ".join("%s=%r" % (field.attr, value)
for field, value in zip(self.__fields__, self)
if value != field.default)))
def Test(cls):
"""Registers a test type."""
assert isinstance(cls, types.TypeType), "a test type must be a class"
# Convert `Input` and `Output` definitions to `Record` subclasses.
for name in ['Input', 'Output']:
record_bases = [Record]
for base in reversed(cls.__mro__):
if name not in base.__dict__:
continue
record_def = base.__dict__[name]
if isinstance(record_def, type) and issubclass(record_def, Record):
record_bases.insert(0, record_def)
continue
record_name = "%s.%s" % (base.__name__, name)
record_members = record_def.__dict__.copy()
record_members['__owner__'] = base
record_type = type(record_name, tuple(record_bases), record_members)
setattr(base, name, record_type)
record_bases.insert(0, record_type)
# Register test and record types.
registry.case_types.append(cls)
if 'Input' in cls.__dict__:
registry.input_types.append(cls.Input)
if 'Output' in cls.__dict__:
registry.output_types.append(cls.Output)
return cls
|
StarcoderdataPython
|
11216372
|
<reponame>curenamo/ssmhub
"""this preset uses a url-type string to implement 12-factor configuration with fewer environment variables.
DATABASE_URL = '<engine>://<user>:<password>@<host>:<port>/<database>'
<engine> can be one of: 'postgresql', 'postgis', 'mysql', 'sqlite'
(default: sqlite3 database file)
DJANGO_CONFIGURATION = 'Dev' # use testing environment
DJANGO_SECRET_KEY = '<key of your invention>'
"""
import os
configuration = os.getenv('DJANGO_CONFIGURATION', 'Dev')
if configuration == 'Dev':
from staging_example import * # get most settings from staging_example.py (which in turn, imports from settings.py)
else:
from production_example import *
url = os.getenv('DATABASE_URL')
if url:
import dj_database_url
DATABASES = {'default': dj_database_url.config(url)}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Make a unique unique key (with a default for testing), and don't share it with anybody.
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', '<KEY>')
|
StarcoderdataPython
|
4895359
|
"""
Run the KGTK-Browser Flask server
Open a browser window with the kgtk-browser location
Optional params:
- hostname (--host)
- port number (-p, --port)
- kgtk browser config file (-c, --config)
- kgtk browser flask app file (-a, --app)
Example usage:
kgtk browser --host 0.0.0.0 --port 1234 --app flask_app.py --config config.py
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
# Define the name of the command and its alias.
BROWSER_COMMAND: str = "browser"
BROWSE_COMMAND: str = "browse"
def parser():
return {
'aliases': [ BROWSE_COMMAND ],
'help': 'Run the KGTK-Browser Flask app.',
'description': 'Open a new browser with the KGTK-Browser app running.',
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.utils.argparsehelpers import optional_bool
# These special shared aruments inticate whether the `--expert` option
# was supplied and the command name that was used.
_expert: bool = parsed_shared_args._expert
_command: str = parsed_shared_args._command
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if _expert:
return msg
else:
return SUPPRESS
# KGTK Browser hostname
parser.add_argument(
'--host',
dest="kgtk_browser_host",
help="Hostname used to launch flask server, defaults to localhost",
default="localhost",
)
# KGTK Browser port number
parser.add_argument(
'-p', '--port',
dest="kgtk_browser_port",
help="Port number used to launch flask server, defaults to 5000",
default="5000",
)
# KGTK Browser configuration file
parser.add_argument(
'-c', '--config',
dest="kgtk_browser_config",
help="KGTK Browser configuration file, defaults to `kgtk_browser_config.py`",
default="kgtk_browser_config.py",
)
# KGTK Browser application file
parser.add_argument(
'-a', '--app',
dest="kgtk_browser_app",
help="KGTK Browser flask application file, defaults to `kgtk_browser_app.py`",
default="kgtk_browser_app.py",
)
def run(
kgtk_browser_host: str = 'localhost',
kgtk_browser_port: str = '5000',
kgtk_browser_config: str = 'kgtk_browser_config.py',
kgtk_browser_app: str = 'kgtk_browser_app.py',
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
# import modules locally
from pathlib import Path
import simplejson as json
import webbrowser
import threading
import os, sys
import typing
from kgtk.exceptions import KGTKException
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % repr(str(input_file_path)), file=error_file, flush=True)
print("--output-file=%s" % repr(str(output_file_path)), file=error_file, flush=True)
idbuilder_options.show(out=error_file)
print("=======", file=error_file, flush=True)
try:
# Set the flask app and configuration file settings
os.environ["FLASK_APP"] = kgtk_browser_app
os.environ["KGTK_BROWSER_CONFIG"] = kgtk_browser_config
# Open the default web browser at the kgtk-browser location
url = "http://{}:{}/browser".format(kgtk_browser_host, kgtk_browser_port)
threading.Timer(2.5, lambda: webbrowser.open(url)).start()
# Run flask app using the selected host and port
os.system(
"flask run --host {} --port {}".format(
kgtk_browser_host,
kgtk_browser_port,
)
)
return 0
except SystemExit as e:
raise KGTKException("Exit requested")
except Exception as e:
raise KGTKException(str(e))
|
StarcoderdataPython
|
208020
|
from typing import Tuple
import pytest
from core.emulator.data import IpPrefixes, LinkOptions
from core.emulator.session import Session
from core.errors import CoreError
from core.nodes.base import CoreNode
from core.nodes.network import SwitchNode
INVALID_ID: int = 100
LINK_OPTIONS: LinkOptions = LinkOptions(
delay=50, bandwidth=5000000, loss=25, dup=25, jitter=10, buffer=100
)
def create_ptp_network(
session: Session, ip_prefixes: IpPrefixes
) -> Tuple[CoreNode, CoreNode]:
# create nodes
node1 = session.add_node(CoreNode)
node2 = session.add_node(CoreNode)
# link nodes to net node
iface1_data = ip_prefixes.create_iface(node1)
iface2_data = ip_prefixes.create_iface(node2)
session.add_link(node1.id, node2.id, iface1_data, iface2_data)
# instantiate session
session.instantiate()
return node1, node2
class TestLinks:
def test_add_node_to_node(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(CoreNode)
iface1_data = ip_prefixes.create_iface(node1)
iface2_data = ip_prefixes.create_iface(node2)
# when
iface1, iface2 = session.add_link(
node1.id, node2.id, iface1_data, iface2_data, options=LINK_OPTIONS
)
# then
assert node1.get_iface(iface1_data.id)
assert node2.get_iface(iface2_data.id)
assert iface1 is not None
assert iface2 is not None
assert iface1.local_options == LINK_OPTIONS
assert iface1.has_local_netem
assert iface2.local_options == LINK_OPTIONS
assert iface2.has_local_netem
def test_add_node_to_net(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(SwitchNode)
iface1_data = ip_prefixes.create_iface(node1)
# when
iface, _ = session.add_link(
node1.id, node2.id, iface1_data=iface1_data, options=LINK_OPTIONS
)
# then
assert node2.links()
assert node1.get_iface(iface1_data.id)
assert iface is not None
assert iface.local_options == LINK_OPTIONS
assert iface.has_local_netem
def test_add_net_to_node(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(CoreNode)
iface2_data = ip_prefixes.create_iface(node2)
# when
_, iface = session.add_link(
node1.id, node2.id, iface2_data=iface2_data, options=LINK_OPTIONS
)
# then
assert node1.links()
assert node2.get_iface(iface2_data.id)
assert iface is not None
assert iface.local_options == LINK_OPTIONS
assert iface.has_local_netem
def test_add_net_to_net(self, session):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(SwitchNode)
# when
iface, _ = session.add_link(node1.id, node2.id, options=LINK_OPTIONS)
# then
assert node1.links()
assert iface is not None
assert iface.local_options == LINK_OPTIONS
assert iface.options == LINK_OPTIONS
assert iface.has_local_netem
assert iface.has_netem
def test_add_node_to_node_uni(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(CoreNode)
iface1_data = ip_prefixes.create_iface(node1)
iface2_data = ip_prefixes.create_iface(node2)
link_options1 = LinkOptions(
delay=50,
bandwidth=5000000,
loss=25,
dup=25,
jitter=10,
buffer=100,
unidirectional=True,
)
link_options2 = LinkOptions(
delay=51,
bandwidth=5000001,
loss=26,
dup=26,
jitter=11,
buffer=101,
unidirectional=True,
)
# when
iface1, iface2 = session.add_link(
node1.id, node2.id, iface1_data, iface2_data, link_options1
)
session.update_link(
node2.id, node1.id, iface2_data.id, iface1_data.id, link_options2
)
# then
assert node1.get_iface(iface1_data.id)
assert node2.get_iface(iface2_data.id)
assert iface1 is not None
assert iface2 is not None
assert iface1.local_options == link_options1
assert iface1.has_local_netem
assert iface2.local_options == link_options2
assert iface2.has_local_netem
def test_update_node_to_net(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(SwitchNode)
iface1_data = ip_prefixes.create_iface(node1)
iface1, _ = session.add_link(node1.id, node2.id, iface1_data)
assert iface1.local_options != LINK_OPTIONS
# when
session.update_link(
node1.id, node2.id, iface1_id=iface1_data.id, options=LINK_OPTIONS
)
# then
assert iface1.local_options == LINK_OPTIONS
assert iface1.has_local_netem
def test_update_net_to_node(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(CoreNode)
iface2_data = ip_prefixes.create_iface(node2)
_, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data)
assert iface2.local_options != LINK_OPTIONS
# when
session.update_link(
node1.id, node2.id, iface2_id=iface2_data.id, options=LINK_OPTIONS
)
# then
assert iface2.local_options == LINK_OPTIONS
assert iface2.has_local_netem
def test_update_ptp(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(CoreNode)
iface1_data = ip_prefixes.create_iface(node1)
iface2_data = ip_prefixes.create_iface(node2)
iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data)
assert iface1.local_options != LINK_OPTIONS
assert iface2.local_options != LINK_OPTIONS
# when
session.update_link(
node1.id, node2.id, iface1_data.id, iface2_data.id, LINK_OPTIONS
)
# then
assert iface1.local_options == LINK_OPTIONS
assert iface1.has_local_netem
assert iface2.local_options == LINK_OPTIONS
assert iface2.has_local_netem
def test_update_net_to_net(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(SwitchNode)
iface1, _ = session.add_link(node1.id, node2.id)
assert iface1.local_options != LINK_OPTIONS
# when
session.update_link(node1.id, node2.id, options=LINK_OPTIONS)
# then
assert iface1.local_options == LINK_OPTIONS
assert iface1.has_local_netem
assert iface1.options == LINK_OPTIONS
assert iface1.has_netem
def test_clear_net_to_net(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(SwitchNode)
iface1, _ = session.add_link(node1.id, node2.id, options=LINK_OPTIONS)
assert iface1.local_options == LINK_OPTIONS
assert iface1.has_local_netem
assert iface1.options == LINK_OPTIONS
assert iface1.has_netem
# when
options = LinkOptions(delay=0, bandwidth=0, loss=0.0, dup=0, jitter=0, buffer=0)
session.update_link(node1.id, node2.id, options=options)
# then
assert iface1.local_options.is_clear()
assert not iface1.has_local_netem
assert iface1.options.is_clear()
assert not iface1.has_netem
def test_delete_node_to_node(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(CoreNode)
iface1_data = ip_prefixes.create_iface(node1)
iface2_data = ip_prefixes.create_iface(node2)
session.add_link(node1.id, node2.id, iface1_data, iface2_data)
assert node1.get_iface(iface1_data.id)
assert node2.get_iface(iface2_data.id)
# when
session.delete_link(node1.id, node2.id, iface1_data.id, iface2_data.id)
# then
assert iface1_data.id not in node1.ifaces
assert iface2_data.id not in node2.ifaces
def test_delete_node_to_net(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(SwitchNode)
iface1_data = ip_prefixes.create_iface(node1)
session.add_link(node1.id, node2.id, iface1_data)
assert node1.get_iface(iface1_data.id)
# when
session.delete_link(node1.id, node2.id, iface1_id=iface1_data.id)
# then
assert iface1_data.id not in node1.ifaces
def test_delete_net_to_node(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(CoreNode)
iface2_data = ip_prefixes.create_iface(node2)
session.add_link(node1.id, node2.id, iface2_data=iface2_data)
assert node2.get_iface(iface2_data.id)
# when
session.delete_link(node1.id, node2.id, iface2_id=iface2_data.id)
# then
assert iface2_data.id not in node2.ifaces
def test_delete_net_to_net(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(SwitchNode)
session.add_link(node1.id, node2.id)
assert node1.get_linked_iface(node2)
# when
session.delete_link(node1.id, node2.id)
# then
assert not node1.get_linked_iface(node2)
def test_delete_node_error(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(SwitchNode)
session.add_link(node1.id, node2.id)
assert node1.get_linked_iface(node2)
# when
with pytest.raises(CoreError):
session.delete_link(node1.id, INVALID_ID)
with pytest.raises(CoreError):
session.delete_link(INVALID_ID, node2.id)
def test_delete_net_to_net_error(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(SwitchNode)
node3 = session.add_node(SwitchNode)
session.add_link(node1.id, node2.id)
assert node1.get_linked_iface(node2)
# when
with pytest.raises(CoreError):
session.delete_link(node1.id, node3.id)
def test_delete_node_to_net_error(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(SwitchNode)
node3 = session.add_node(SwitchNode)
iface1_data = ip_prefixes.create_iface(node1)
iface1, _ = session.add_link(node1.id, node2.id, iface1_data)
assert iface1
# when
with pytest.raises(CoreError):
session.delete_link(node1.id, node3.id)
def test_delete_net_to_node_error(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(SwitchNode)
node2 = session.add_node(CoreNode)
node3 = session.add_node(SwitchNode)
iface2_data = ip_prefixes.create_iface(node2)
_, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data)
assert iface2
# when
with pytest.raises(CoreError):
session.delete_link(node1.id, node3.id)
def test_delete_node_to_node_error(self, session: Session, ip_prefixes: IpPrefixes):
# given
node1 = session.add_node(CoreNode)
node2 = session.add_node(CoreNode)
node3 = session.add_node(SwitchNode)
iface1_data = ip_prefixes.create_iface(node1)
iface2_data = ip_prefixes.create_iface(node2)
iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data)
assert iface1
assert iface2
# when
with pytest.raises(CoreError):
session.delete_link(node1.id, node3.id)
|
StarcoderdataPython
|
3408675
|
<reponame>UniversitaDellaCalabria/datiFontiSparse<gh_stars>0
from django.contrib import admin
from template.admin import AbstractCreatedModifiedBy
from .admin_inline import *
from .models import *
@admin.register(Visiting)
class VisitingAdmin(AbstractCreatedModifiedBy):
list_display = ('visitor', 'from_structure', 'to_structure', 'role')
search_fields = ('visitor__last_name',)
list_filter = ('from_structure', 'to_structure', 'role__role_type')
inlines = (VisitingCollaborationAdminInline,)
@admin.register(Role)
class RoleAdmin(admin.ModelAdmin):
list_display = ('role_type',)
@admin.register(Collaboration)
class CollaborationAdmin(admin.ModelAdmin):
list_display = ('collab_type',)
|
StarcoderdataPython
|
372416
|
<filename>chp8/trajectory_sampling.py
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 10:44:13 2021
@author: leyuan
"""
import time
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tqdm import tqdm
# 2 actions
ACTIONS = [0, 1]
# each transition has a probability to terminate with 0
TERMINATION_PROB = 0.1
# maximum expected updates
MAX_STEPS = 20000
# epsilon greedy for behavior policy
EPSILON = 0.1
# break tie randomly
def argmax(value):
return np.random.choice(np.where(value == np.max(value))[0])
class Task(object):
def __init__(self, n_states, b):
'''
@n_states: num of non-terminal state
@b: # of branches
each episode starts at state 0, and terminates at state n_states
'''
self.n_states = n_states
self.b = b
# transition matrix, each state-action pair leads to b possible states with equal prob
# and with a different random selection of b states for each state–action pair.
self.transition = np.random.randint(n_states, size=(n_states, len(ACTIONS), b))
# self.transition = np.zeros((n_states, len(ACTIONS), b))
# for i in range(n_states):
# for j in range(len(ACTIONS)):
# self.transition[i, j] = np.random.sample(n_states, b, replace=False)
self.reward = np.random.randn(n_states, len(ACTIONS), b)
def step(self, state, action):
if np.random.rand() < TERMINATION_PROB:
return self.n_states, 0
idx = np.random.choice(self.b)
return self.transition[state, action, idx], self.reward[state, action, idx]
# Evaluate the value of the start state for the greedy policy
def evaluate_pi(q_value, task):
# use MC method to estimate the state value
runs = 1000
returns = []
for r in range(runs):
reward = 0
state = 0
while state < task.n_states:
action = argmax(q_value[state])
state, r = task.step(state, action)
reward += r
returns.append(reward)
return np.mean(returns)
def uniform(task, eval_interval):
'''
perform expected update from a uniform state-action distribution of the MDP @task
evaluate the learned q value every @eval_interval steps
'''
performance = []
q_value = np.zeros((task.n_states, len(ACTIONS)))
for step in tqdm(range(MAX_STEPS)):
'''
因为是state-action pair是均匀分布,所有在MAX_STEPS当中平均个pair更新MAX_STEPS/(n_states * 2)次
可以采用随机抽样的方式,每次从state里抽一个,从action里抽一个,但是这样程序会比较慢
参考的代码里给出了一个比较巧妙的近似,就是从头到尾轮着更新
'''
state = step // len(ACTIONS) % task.n_states
action = step % len(ACTIONS)
next_states = task.transition[state, action]
q_value[state, action] = (1 - TERMINATION_PROB) * np.mean(task.reward[state, action] + np.max(q_value[next_states, :], axis=1))
if step % eval_interval == 0:
v_pi = evaluate_pi(q_value, task)
performance.append([step, v_pi])
return zip(*performance)
def on_policy(task, eval_interval):
performance = []
q_value = np.zeros((task.n_states, len(ACTIONS)))
state = 0 # every episode starts at state 0
for step in tqdm(range(MAX_STEPS)):
if np.random.rand() < EPSILON:
action = np.random.choice(ACTIONS)
else:
action = argmax(q_value[state])
next_state, _ = task.step(state, action) # 因为用的是期望更新,所以采样出来的r不需要
next_states = task.transition[state, action]
q_value[state, action] = (1 - TERMINATION_PROB) * np.mean(task.reward[state, action] + np.max(q_value[next_states, :], axis=1))
'''
这个更新表达适合uniform是一样的,因为都是期望跟新,但是更要更新的内容q(s,a)是不同的,
这里是来自于on policy的分布,uniform是来自于均匀分布
下面判断next_state是否等于terminal state也是两者的差异,因为是轨迹采样(step函数),
所以有可能会达到terminal
'''
if next_state == task.n_states:
next_state = 0
state = next_state
if step % eval_interval == 0:
v_pi = evaluate_pi(q_value, task)
performance.append([step, v_pi])
return zip(*performance)
def figure_8_8():
num_states = [1000, 10000]
branch = [1, 3, 10]
methods =[on_policy, uniform]
# average over 30 tasks
n_tasks = 30
# num of evaluation points
x_ticks = 100
plt.figure(figsize=(10, 20))
for i, n in enumerate(num_states):
plt.subplot(2, 1, i+1)
for b in branch:
tasks = [Task(n, b) for _ in range(n_tasks)]
for method in methods:
steps = None
value = []
for task in tasks:
steps, v = method(task, MAX_STEPS / x_ticks)
value.append(v)
value = np.mean(np.asarray(value), axis=0)
plt.plot(steps, value, label=f'branch = {b}, {method.__name__}')
plt.title(f'{n} states')
plt.xlabel('computation time, in expected updates')
plt.ylabel('value of start state')
plt.legend()
figure_8_8()
|
StarcoderdataPython
|
3533284
|
# Generated by Django 3.2 on 2021-04-13 15:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat_bots', '0008_alter_bottype_id'),
('chat_bots', '0009_auto_20210413_1823'),
]
operations = [
]
|
StarcoderdataPython
|
8136499
|
from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from problem.models import Response, Problem
from problem.serializers import ResponseSerializer, ProblemSerializer
class BaseProblemAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, \
mixins.CreateModelMixin):
"""Base viewset for problem attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class ResponseViewSet(BaseProblemAttrViewSet):
"""Manage responses in the database"""
queryset = Response.objects.all()
serializer_class = ResponseSerializer
class ProblemViewSet(BaseProblemAttrViewSet):
"""Manage problems in the database"""
queryset = Problem.objects.all()
serializer_class = ProblemSerializer
|
StarcoderdataPython
|
12857068
|
<reponame>LordGhostX/ECXBotsMastery
import requests
from bs4 import BeautifulSoup
def find_word_meaning(word):
r = requests.get(f"https://www.dictionary.com/browse/{word}")
if r.status_code == 200:
page = BeautifulSoup(r.text, "html.parser")
luna_pos = page.find("span", {"class": "luna-pos"}).text
word_meaning = f"{word} - {luna_pos}\n\n"
meanings = page.find(
"div", {"class": "css-1uqerbd e1hk9ate0"}).find_all("div", {"class": "e1q3nk1v2"})
for i, meaning in enumerate(meanings):
word_meaning += f"{i + 1} - {meaning.find('span').text}\n\n"
return word_meaning.strip()
elif r.status_code == 404:
return "the specified word does not exist!"
else:
return "an error occured while finding word meaning!"
if __name__ == "__main__":
print(find_word_meaning("intense"))
|
StarcoderdataPython
|
3391897
|
# -*- coding: utf-8 -*-
"""
File Name: largest-perimeter-triangle.py
Author : jynnezhang
Date: 2020/11/29 12:55 下午
Description:
https://leetcode-cn.com/problems/largest-perimeter-triangle/
"""
class Solution:
def largestPerimeter(self, A=[]) -> int:
if not A or len(A) < 3:
return 0
A.sort(reverse=True)
i = 2
while i < len(A):
a1, a2, a3 = A[i-2], A[i-1], A[i]
if a2 + a3 > a1:
return a1 + a2 + a3
else:
i += 1
return 0
if __name__ == '__main__':
print(Solution().largestPerimeter([3,6,2,3]))
|
StarcoderdataPython
|
8091134
|
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import typed_python._types as _types
import unittest
from flaky import flaky
from typed_python import (
Tuple, NamedTuple, Class, Member, ListOf, Compiled,
Final, Forward, OneOf
)
from typed_python import Entrypoint, makeNamedTuple, Function
class TestTupleCompilation(unittest.TestCase):
def test_tuple_passing(self):
T = Tuple(float, int, str)
@Compiled
def f(x: T) -> T:
y = x
return y
t = T((0.0, 1, "hi"))
self.assertEqual(f(t), t)
def test_named_tuple_passing(self):
NT = NamedTuple(a=float, b=int, c=str)
@Compiled
def f(x: NT) -> NT:
y = x
return y
nt = NT(a=0.0, b=1, c="hi")
self.assertEqual(f(nt), nt)
def test_named_tuple_getattr(self):
NT = NamedTuple(a=float, b=int, c=str)
@Compiled
def f(x: NT) -> str:
return x.c + x.c
nt = NT(a=0.0, b=1, c="hi")
self.assertEqual(f(nt), "hihi")
def test_named_tuple_assignment_refcounting(self):
class C(Class):
x = Member(int)
NT = NamedTuple(c=C)
@Compiled
def f(x: NT):
y = x
return y.c
c = C(x=20)
res = f(NT(c=c))
self.assertEqual(res.x, 20)
self.assertEqual(_types.refcount(res), 2)
def test_indexing(self):
T = Tuple(int, str)
@Entrypoint
def getFirst(t):
return t[0]
@Entrypoint
def getSecond(t):
return t[1]
@Entrypoint
def getIx(t, i):
return t[i]
self.assertEqual(getFirst(T((1, '2'))), 1)
self.assertEqual(getSecond(T((1, '2'))), '2')
self.assertEqual(getIx(T((1, '2')), 0), 1)
self.assertEqual(getIx(T((1, '2')), 1), '2')
def test_iterating(self):
@Entrypoint
def tupToString(x):
res = ListOf(str)()
for elt in x:
res.append(str(elt))
return res
self.assertEqual(
tupToString(Tuple(int, str)((0, 'a'))),
["0", "a"]
)
def test_named_tuple_replacing_error(self):
"""We should have errors for all the field names passed to the replacing function,
if the fields are not in the tuple definition.
"""
NT = NamedTuple(a=int, b=str)
n1 = NT(a=1, b='x')
@Compiled
def f1(x: NT) -> NT:
return x.replacing(c=10)
with self.assertRaisesRegex(Exception, "The arguments list contain names 'c' which are not in the tuple definition."):
f1(n1)
@Compiled
def f2(x: NT) -> NT:
return x.replacing(c=10, d=10, e=10)
with self.assertRaisesRegex(Exception, "The arguments list contain names 'c, d, e' which are not in the tuple definition."):
f2(n1)
def test_named_tuple_replacing_function(self):
NT = NamedTuple(a=int, b=str)
n1 = NT(a=1, b='x')
@Compiled
def f1(x: NT, a: int) -> NT:
return x.replacing(a=a)
n2 = f1(n1, 10)
self.assertIsNot(n1, n2)
self.assertEqual(n2.a, 10)
self.assertEqual(n2.b, 'x')
@Compiled
def f2(x: NT, a: int, b: str) -> NT:
return x.replacing(a=a, b=b)
n3 = f2(n2, 123, '345')
self.assertIsNot(n1, n2)
self.assertIsNot(n2, n3)
self.assertEqual(n3.a, 123)
self.assertEqual(n3.b, '345')
def test_named_tuple_replacing_refcount(self):
N = NamedTuple(x=ListOf(int))
N = NamedTuple(x=ListOf(int))
aList = ListOf(int)([1, 2, 3])
self.assertEqual(_types.refcount(aList), 1)
nt = N().replacing(x=aList)
self.assertEqual(nt.x, aList)
self.assertEqual(_types.refcount(aList), 2)
nt = None
self.assertEqual(_types.refcount(aList), 1)
def test_named_tuple_construction(self):
NT = NamedTuple(x=ListOf(int), y=float)
@Entrypoint
def makeNt():
return NT()
@Entrypoint
def makeNtX(x):
return NT(x=x)
@Entrypoint
def makeNtY(y):
return NT(y=y)
@Entrypoint
def makeNtXY(x, y):
return NT(x=x, y=y)
self.assertEqual(makeNt(), NT())
self.assertEqual(makeNtX(ListOf(int)([1, 2, 3])), NT(x=ListOf(int)([1, 2, 3])))
self.assertEqual(makeNtXY(ListOf(int)([1, 2, 3]), 2.0), NT(x=ListOf(int)([1, 2, 3]), y=2.0))
self.assertEqual(makeNtY(2.0), NT(y=2.0))
with self.assertRaisesRegex(TypeError, "Couldn't initialize type ListOf.int. from float"):
makeNtX(1.2)
def test_compile_make_named_tuple(self):
@Entrypoint
def makeNt(x, y):
return makeNamedTuple(x=x, y=y)
self.assertEqual(makeNt(1, 2), makeNamedTuple(x=1, y=2))
self.assertEqual(makeNt(1, "2"), makeNamedTuple(x=1, y="2"))
def test_compiled_tuple_construction(self):
def makeNamed(x, y):
return NamedTuple(x=type(x), y=type(y))((x, y))
def makeUnnamed(x, y):
return Tuple(type(x), type(y))((x, y))
def check(f, x, y):
compiledRes = Entrypoint(f)(x, y)
interpRes = f(x, y)
self.assertEqual(compiledRes, interpRes)
self.assertEqual(type(compiledRes), type(interpRes))
check(makeNamed, 1, 2)
check(makeNamed, 1, "2")
check(makeUnnamed, 1, 2)
check(makeUnnamed, 1, "2")
def test_compare_tuples(self):
ClassWithCompare = Forward("ClassWithCompare")
@ClassWithCompare.define
class ClassWithCompare(Class, Final):
x = Member(int)
y = Member(int)
def __eq__(self, other: ClassWithCompare):
return self.x == other.x and self.y == other.y
def __lt__(self, other: ClassWithCompare):
if self.x < other.x:
return True
if self.x > other.x:
return True
return self.y < other.y
aTuple1 = Tuple(int, int, int)((1, 2, 3))
aTuple2 = Tuple(int, int, int)((1, 2, 4))
aTuple3 = Tuple(int, ClassWithCompare)((1, ClassWithCompare(x=2, y=3)))
aTuple4 = Tuple(int, ClassWithCompare)((1, ClassWithCompare(x=2, y=4)))
aTuple5 = NamedTuple(x=int, y=int, z=int)((1, 2, 3))
aTuple6 = NamedTuple(x=int, y=int, z=int)((1, 2, 4))
aTuple7 = NamedTuple(x=int, y=ClassWithCompare)((1, ClassWithCompare(x=2, y=3)))
aTuple8 = NamedTuple(x=int, y=ClassWithCompare)((1, ClassWithCompare(x=2, y=4)))
def check(l, expected=None):
if expected is not None:
self.assertEqual(l(), expected)
self.assertEqual(l(), Entrypoint(l)())
for tup1, tup2 in [
(aTuple1, aTuple2), (aTuple3, aTuple4),
(aTuple5, aTuple6), (aTuple7, aTuple8)
]:
check(lambda: tup1 == tup1, True)
check(lambda: tup2 == tup2, True)
check(lambda: tup1 != tup2, True)
check(lambda: tup2 != tup1, True)
someTuples = [
Tuple(int, int)((1, 2)),
Tuple(int, int)((1, 3)),
Tuple(int, int)((2, 2)),
Tuple(int, int)((2, 3)),
Tuple(int, int, int)((2, 3, 4)),
Tuple(int, int, int)((1, 2, 3)),
NamedTuple(x=int, y=int, z=int)((2, 3, 4)),
NamedTuple(x=int, y=int, z=int)((1, 2, 3)),
]
for t1 in someTuples:
for t2 in someTuples:
check(lambda: t1 < t2)
check(lambda: t1 > t2)
check(lambda: t1 <= t2)
check(lambda: t1 >= t2)
check(lambda: t1 == t2)
check(lambda: t1 != t2)
def test_subclass_of_named_tuple_compilation(self):
NT = NamedTuple(a=int, b=str)
class X(NT):
def aMethod(self, x):
return self.a + x
@property
def aProperty(self):
return self.a + 100
@staticmethod
def aStaticMethod(x):
return x
@Function
def aMethodWithType(self, x) -> float:
return self.a + x
assert X().aProperty == 100
@Entrypoint
def makeAnX(a, b):
return X(a=a, b=b)
assert makeAnX(a=1, b="hi").a == 1
assert makeAnX(a=1, b="hi").b == 'hi'
@Entrypoint
def callAnXMethod(x: X, y):
return x.aMethod(y)
assert callAnXMethod(X(a=1), 10) == 11
@Entrypoint
def callAnXMethodWithType(x: X, y):
return x.aMethodWithType(y)
assert callAnXMethod(X(a=1), 10.5) == 11.5
@Entrypoint
def callAStaticMethodOnInstance(x: X, y):
return x.aStaticMethod(y)
callAStaticMethodOnInstance(X(a=1), 12) == 12
@Entrypoint
def callAStaticMethodOnClass(y):
return X.aStaticMethod(y)
assert callAStaticMethodOnClass(13) == 13
@Entrypoint
def getAProperty(x: X):
return x.aProperty
assert getAProperty(X(a=12)) == 112
@flaky(max_runs=3, min_passes=1)
def test_subclass_of_named_tuple_compilation_perf(self):
NT = NamedTuple(a=float, b=str)
class X(NT):
def aMethod(self, x):
return self.a + x
@Entrypoint
def loop(self, x, times):
res = 0.0
for i in range(times):
res += self.aMethod(x)
return res
X(a=123).loop(1.2, 100)
t0 = time.time()
X(a=123).loop(1.2, 1000000)
print(time.time() - t0, " to do 1mm")
# I get about .001 seconds for this.
assert time.time() - t0 < .01
def test_bound_method_on_named_tuple(self):
class NT(NamedTuple(x=str)):
@Function
def f(self, x):
return x + self.x
@Entrypoint
def callIt(n):
method = n.f
return method("asdf")
assert callIt(NT(x="hi")) == "asdfhi"
def test_negative_indexing(self):
@Entrypoint
def sliceAt(tup, ix):
return tup[ix]
@Entrypoint
def sliceAtOne(tup):
return tup[1]
@Entrypoint
def sliceAtMinusOne(tup):
return tup[-1]
class A(Class):
pass
class B(Class):
pass
tup = Tuple(A, B)([A(), B()])
self.assertEqual(sliceAtMinusOne(tup), sliceAtOne(tup))
self.assertEqual(sliceAt(tup, -2), sliceAt(tup, 0))
with self.assertRaises(IndexError):
sliceAt(tup, -3)
self.assertIs(sliceAtMinusOne.resultTypeFor(type(tup)).interpreterTypeRepresentation, B)
self.assertIs(Function(lambda tup: sliceAt(tup, -2)).resultTypeFor(type(tup)).interpreterTypeRepresentation, OneOf(A, B))
def test_construct_named_tuple_with_other_named_tuple(self):
# we should be matching the names up correctly
T1 = NamedTuple(x=int, y=str)
T2 = NamedTuple(y=str, x=int)
T3 = NamedTuple(z=float, y=str, x=int)
@Entrypoint
def firstCheck():
assert T1(T2(T1(x=10, y='hello'))) == T1(x=10, y='hello')
# we can construct a bigger tuple from a smaller one
assert T3(T2(x=10, y='hello')).y == 'hello'
firstCheck()
@Entrypoint
def secondCheck():
T2(T3(x=10, y='hello'))
# but not in reverse
with self.assertRaises(TypeError):
secondCheck()
def test_construct_named_tuple_with_incorrect_number_of_arguments(self):
# we should be matching the names up correctly
NT = NamedTuple(z=float, y=str, x=int)
@Entrypoint
def checkIt():
return NT(z=10, x=20)
assert checkIt().y == ""
def test_cant_construct_named_tuple_with_non_default_initializable(self):
# we should be matching the names up correctly
C = Forward("C")
@C.define
class C(Class):
def __init__(self):
pass
assert not _types.is_default_constructible(C)
NT = NamedTuple(x=float, c=C)
@Entrypoint
def checkIt():
return NT(x=10)
with self.assertRaisesRegex(TypeError, "Can't default initialize member 'c'"):
checkIt()
|
StarcoderdataPython
|
1711788
|
from time import sleep
from typing import List, Union, Dict
from privex.loghelper import LogHelper
from tests.base import PrivexBaseCase
from privex.helpers import thread as modthread, LockConflict, random_str, OrderedDictObject
from privex.helpers.thread import BetterEvent, event_multi_wait_all, event_multi_wait_any, lock_acquire_timeout, SafeLoopThread
from collections import namedtuple
from threading import Event, Lock
import threading
import queue
import logging
LOG_FORMATTER = logging.Formatter('[%(asctime)s]: %(name)-25s -> %(funcName)-35s : %(levelname)-8s:: %(message)s')
_lh = LogHelper(__name__, handler_level=logging.DEBUG, formatter=LOG_FORMATTER)
_lh.add_console_handler()
_lh.copy_logger('privex.helpers.thread')
log = logging.getLogger(__name__)
# release_lock = BetterEvent(name='Global Release Lock event')
shared_lock = threading.Lock()
shared_queue = queue.Queue()
stop_threads = BetterEvent(name='Global stop_threads')
LockCheck = namedtuple('LockCheck', 'thread_id was_locked lock_exception thread_name')
UnlockEvent = namedtuple('UnlockEvent', 'thread_id thread_name')
class LockerThread(SafeLoopThread):
loop_sleep = 0.05
def __init__(self, lock: threading.Lock, timeout=2, fail=True, hold_lock_start=True, **kwargs):
kwargs = dict(kwargs)
# Arguments passed to lock_acquire_timeout
self.lock = lock
self.timeout = timeout
self.fail = fail
# Amount of time to wait between each `release_lock` check after the lock is acquired.
# self.lock_hold_sleep = kwargs.get('lock_hold_sleep', 0.2)
# When the release_lock is in the SET position, the thread will hold the lock until release_lock is cleared.
self.release_lock = BetterEvent(name='Release Lock')
if not hold_lock_start:
log.info("hold_lock_start is False. Triggering event self.release_lock (do not hold lock)")
self.release_lock.set()
self.event_change_lock = Lock()
self.pause_if_locked = kwargs.pop('pause_if_locked', True)
super().__init__(stop_events=[stop_threads], **kwargs)
@property
def should_lock(self):
return not self.release_lock.is_set()
def emit_lock(self, event_lock_timeout=None):
with lock_acquire_timeout(self.event_change_lock, event_lock_timeout, fail=True, block=event_lock_timeout is not None):
return self.release_lock.clear()
def emit_unlock(self, event_lock_timeout=None):
with lock_acquire_timeout(self.event_change_lock, event_lock_timeout, fail=True, block=event_lock_timeout is not None):
return self.release_lock.set()
def loop(self):
if not self.should_lock:
log.debug(f" [{self.name}] Waiting for release_lock event to be cleared...")
ev_trig = event_multi_wait_any(self.release_lock, *self.stop_events, invert_idx=[0], wait_timeout=20)
return log.debug(f" [{self.name}] Finished waiting due to events: {ev_trig}")
log.info(f" [{self.name}] Acquiring lock: %s", self.lock)
try:
with modthread.lock_acquire_timeout(self.lock, self.timeout, fail=self.fail) as locked:
if not locked:
log.debug(f" [{self.name}] did not acquire lock. not waiting to hold lock open.")
if self.pause_if_locked:
log.debug(f" [{self.name}] pause_if_locked is True. setting release_lock to pause lock acquisition attempts.")
try:
self.emit_unlock()
except LockConflict:
log.debug(f" [{self.name}] got lock conflict while setting release_lock...")
self.out_queue.put(LockCheck(self.ident, locked, lock_exception=None, thread_name=self.name))
if not locked:
return log.debug(f" [{self.name}] lock not acquired, returning loop...")
log.debug(f" [{self.name}] waiting until release_lock or any event in stop_events is triggered...")
ev_trig = event_multi_wait_any(self.release_lock, *self.stop_events)
log.debug(f" [{self.name}] finished waiting to release lock due to events: {ev_trig}")
if locked:
log.debug(f" [{self.name}] release_lock released or thread stop event fired. releasing previously acquired lock.")
was_locked = bool(locked)
log.debug(f" [{self.name}] finished lock_acquire_timeout context manager block. lock will be released if we held it...")
if was_locked:
self.out_queue.put(UnlockEvent(self.ident, self.name))
except LockConflict as e:
log.debug(f" [{self.name}] Lock conflict / timeout exception was raised: %s %s", type(e), str(e))
self.out_queue.put(LockCheck(self.ident, None, e, thread_name=self.name))
except Exception as e:
log.exception(f" [{self.name}] Exception raised while acquiring lock: %s %s", type(e), str(e))
self.out_queue.put(LockCheck(self.ident, None, e, thread_name=self.name))
ThreadTypes = Union[threading.Thread, LockerThread]
class TestThreading(PrivexBaseCase):
"""Test cases for :mod:`privex.helpers.thread` functions/classes"""
threads: Union[OrderedDictObject, Dict[str, ThreadTypes]] = OrderedDictObject()
def tearDown(self) -> None:
if len(self.threads) > 0:
stop_threads.set()
sleep(0.3)
thread_keys = list(self.threads.keys())
for name in thread_keys:
t = self.threads[name]
if not t.is_alive():
log.debug("Thread '%s' is dead. Removing from thread dict...", name)
del self.threads[name]
continue
log.debug("Thread '%s' is still alive. Joining and waiting for it to shutdown...", name)
if hasattr(t, 'emit_stop'):
log.debug("Thread '%s' has emit_stop method. Calling emit_stop before joining.", name)
t.emit_stop()
t.join(3)
log.debug("Removing stopped thread %s", name)
del self.threads[name]
log.debug("Successfully removed stopped thread %s", name)
# Reset global event thread signals to their default, empty queues, and release any leftover locks.
# if release_lock.is_set(): release_lock.clear()
if shared_lock.locked(): shared_lock.release()
while not shared_queue.empty():
shared_queue.get_nowait()
if stop_threads.is_set(): stop_threads.clear()
@classmethod
def _mk_locker(cls, lock: threading.Lock, timeout=2, fail=True, hold_lock_start=False, name=None, **kwargs) -> LockerThread:
"""
:param threading.Lock lock:
:param int|float timeout:
:param bool fail:
:param bool hold_lock_start:
:param str name:
:param kwargs:
:return:
"""
auto_start = kwargs.pop('auto_start', True)
name = random_str(8) if name is None else name
t = LockerThread(lock, timeout=timeout, fail=fail, hold_lock_start=hold_lock_start, **kwargs)
t.name = name
t.daemon = kwargs.pop('daemon', False)
if auto_start:
t.start()
cls.threads[name] = t
return t
@staticmethod
def _cleanup_lockers(*lockers: LockerThread):
for l in lockers:
l.emit_unlock() # Release any lock they might be holding
l.emit_stop() # Stop the locker thread
if l.is_alive(): # Join the thread if it's alive so that it can shutdown correctly.
l.join(1)
def test_lock_acquire_timeout_basic(self):
# First we test that we can successfully acquire an unlocked lock
t1 = self._mk_locker(shared_lock, timeout=2, fail=False, name="acquire_lock_timeout_t1")
self.assertFalse(shared_lock.locked())
self.assertTrue(t1.emit_lock(), msg="emit_lock should've returned True to acknowledge release_lock flipping")
# Check the LockCheck result from the thread queue
res: LockCheck = t1.out_queue.get(block=True, timeout=2)
self.assertTrue(res.was_locked)
self.assertTrue(shared_lock.locked())
self.assertIsNone(res.lock_exception)
self.assertEqual(res.thread_name, "acquire_lock_timeout_t1")
# Ask t1 to release the lock
t1.emit_unlock()
res: UnlockEvent = t1.out_queue.get(block=True, timeout=2)
self.assertEqual(res.thread_name, 'acquire_lock_timeout_t1')
self.assertFalse(shared_lock.locked())
# Stop t1
t1.emit_stop()
t1.join(1)
def test_lock_acquire_timeout_timed_out(self):
self.assertFalse(shared_lock.locked())
# First we acquire a lock using our first thread
log.debug(" >>> thread 1 acquire")
t1 = self._mk_locker(shared_lock, timeout=4, fail=False, name="timeout_t1")
t1.emit_lock()
# Check the LockCheck result from the thread queue
res: LockCheck = t1.out_queue.get(block=True, timeout=2)
self.assertTrue(res.was_locked)
self.assertTrue(shared_lock.locked()) # Confirm our lock is locked
# Now we try and acquire the lock with a second thread
log.debug(" >>> thread 2 acquire (test lock timeout fail)")
t2 = self._mk_locker(shared_lock, timeout=2, fail=False, name="timeout_t2")
t2.emit_lock()
# Confirm that t2 failed to get the lock
res: LockCheck = t2.out_queue.get(block=True, timeout=4)
self.assertFalse(res.was_locked)
self.assertTrue(shared_lock.locked())
self.assertIsNone(res.lock_exception)
# Now we'll ask t2 to try and get the lock again, wait 200ms and release the lock
log.debug(" >>> thread 2 acquiring (unlocking thread 1)")
t2.emit_lock()
sleep(0.2)
log.debug(" >>> thread 1 unlocking")
t1.emit_unlock()
# If the lock wait timeout was being acknowledged, t2 should now have the lock.
log.debug(" >>> get thread 2 out_queue")
res: LockCheck = t2.out_queue.get(block=True, timeout=4)
self.assertTrue(res.was_locked)
self.assertTrue(shared_lock.locked())
# Now we'll release the lock and confirm the lock is unlocked again
log.debug(" >>> thread 2 unlock")
t2.emit_unlock()
res: UnlockEvent = t2.out_queue.get(block=True, timeout=4)
self.assertEqual(res.thread_name, 'timeout_t2')
self.assertFalse(shared_lock.locked())
log.debug(" >>> cleanup")
# If we got this far - everything is fine :) - stop the threads and cleanup
self._cleanup_lockers(t1, t2)
|
StarcoderdataPython
|
3542289
|
import sys
class Solution:
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
left = 0
right = len(arr) - 1
pos = 0
if x < arr[0]:
return arr[0:k]
elif x > arr[-1]:
return arr[-k:]
else:
while left <= right:
mid = (left + right) / 2
start = int(mid)
end = start + 1 if mid - start > 0.1 else start
if x < arr[start]:
right = start
elif arr[end] < x:
left = end
elif arr[start] < x < arr[end]:
pos = mid
break
elif arr[start] == x:
pos = start
break
elif arr[end] == x:
pos = end
break
return self.expend(arr, pos, x, k)
def expend(self, arr, center, x, width):
start = int(center)
end = start + 1 if center - start > 0.1 else start
result = []
max_diff = arr[-1] - arr[0]
while len(result) < width:
if start < 0:
diff1 = max_diff + 1
else:
diff1 = x - arr[start]
if end >= len(arr):
diff2 = max_diff + 1
else:
diff2 = arr[end] - x
if start == end:
result = [arr[start]]
start = start - 1
end = end + 1
elif diff1 <= diff2:
result.insert(0, arr[start])
start = start - 1
else:
result.append(arr[end])
end = end + 1
return result
def main(*args):
result = Solution().findClosestElements([0, 0, 1, 2, 3, 3, 4, 7, 7, 8], 3, 3)
# result = Solution().expend([1, 2, 4, 6, 7], 1.5, 3, 3)
print(result)
if __name__ == '__main__':
main(*sys.argv[1:])
|
StarcoderdataPython
|
6445868
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import sawatabi.constants as constants
from sawatabi.model import LogicalModel
from sawatabi.model.constraint import NHotConstraint
@pytest.fixture
def ising():
return LogicalModel(mtype="ising")
@pytest.fixture
def qubo():
return LogicalModel(mtype="qubo")
################################
# Select
################################
def test_logical_model_select(ising):
x = ising.variables("x", shape=(10, 10))
ising.add_interaction(x[0][0], coefficient=10.0)
ising.add_interaction(x[0, 1], name="my name", coefficient=20.0)
ising.add_interaction((x[0, 0], x[0, 1]), coefficient=30.0, timestamp=1234567890.123, attributes={"foo": "bar", "my attr": "my value"})
# single result
selected = ising.select_interaction("name == 'x[0][0]'")
assert type(selected) is pd.core.frame.DataFrame
assert len(selected) == 1
assert selected["name"].values[0] == "x[0][0]"
assert selected["key"].values[0] == "x[0][0]"
assert id(selected["interacts"].values[0]) == id(x[0][0])
assert selected["coefficient"].values[0] == 10.0
# dict format
selected = ising.select_interaction("name == 'my name'", fmt="dict")
assert type(selected) is dict
assert len(selected) == 1
key = list(selected.keys())[0]
assert selected[key]["name"] == "my name"
assert selected[key]["key"] == "x[0][1]"
assert id(selected[key]["interacts"]) == id(x[0][1])
assert selected[key]["coefficient"] == 20.0
# multiple results
selected = ising.select_interaction("timestamp > 1234567890.000")
assert len(selected) == 3
assert selected["name"].values[0] == "x[0][0]"
assert selected["name"].values[1] == "my name"
assert selected["name"].values[2] == "x[0][0]*x[0][1]"
assert selected["coefficient"].values[0] == 10.0
assert selected["coefficient"].values[1] == 20.0
assert selected["coefficient"].values[2] == 30.0
assert selected["attributes.foo"].values[2] == "bar"
assert selected["attributes.my attr"].values[2] == "my value"
# empty
selected = ising.select_interaction("timestamp < 1234567890.000")
assert len(selected) == 0
# attributes
selected = ising.select_interaction("`attributes.foo` == 'bar'")
assert len(selected) == 1
assert selected["name"].values[0] == "x[0][0]*x[0][1]"
selected = ising.select_interaction("`attributes.my attr` == 'my value'")
assert len(selected) == 1
assert selected["name"].values[0] == "x[0][0]*x[0][1]"
# invalid query
with pytest.raises(pd.core.computation.ops.UndefinedVariableError):
ising.select_interaction("invalid == 'invalid'")
# invalid format
with pytest.raises(ValueError):
ising.select_interaction("name == 'x[0][0]'", fmt="invalid")
def test_logical_model_select_interactions_by_variable(ising):
x = ising.variables("x", shape=(10, 10))
ising.add_interaction(x[0, 0], coefficient=10.0)
ising.add_interaction(x[0, 1], coefficient=20.0)
ising.add_interaction((x[0, 0], x[0, 1]), coefficient=30.0)
selected = ising.select_interactions_by_variable(x[0, 0])
assert type(selected) is np.ndarray
assert len(selected) == 2
assert selected[0] == "x[0][0]"
assert selected[1] == "x[0][0]*x[0][1]"
################################
# Add
################################
def test_logical_model_add(ising):
x = ising.variables("x", shape=(2, 2))
ising.add_interaction(x[0, 0], coefficient=1.0)
assert len(ising._interactions_array["name"]) == 1
assert ising._interactions_array["name"][0] == "x[0][0]"
assert len(ising._interactions_array["key"]) == 1
assert ising._interactions_array["key"][0] == "x[0][0]"
assert len(ising._interactions_array["interacts"]) == 1
assert ising._interactions_array["interacts"][0] == x[0, 0]
assert len(ising._interactions_array["coefficient"]) == 1
assert ising._interactions_array["coefficient"][0] == 1.0
assert len(ising._interactions_array["scale"]) == 1
assert ising._interactions_array["scale"][0] == 1.0
assert ising._interactions_length == 1
selected = ising.select_interaction("name == 'x[0][0]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 1
assert selected["name"].values[0] == "x[0][0]"
assert selected["key"].values[0] == "x[0][0]"
assert selected["interacts"].values[0] == x[0, 0]
assert id(selected["interacts"].values[0]) == id(x[0, 0])
assert ising._interactions[ising._interactions["name"] == "x[0][0]"]["coefficient"].values[0] == 1.0
assert ising._interactions[ising._interactions["name"] == "x[0][0]"]["scale"].values[0] == 1.0
ising.add_interaction(x[0, 1], coefficient=2.0, scale=0.1)
selected = ising.select_interaction("name == 'x[0][1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert selected["name"].values[0] == "x[0][1]"
assert ising._interactions[ising._interactions["name"] == "x[0][1]"]["coefficient"].values[0] == 2.0
assert ising._interactions[ising._interactions["name"] == "x[0][1]"]["scale"].values[0] == 0.1
# attributes
ising.add_interaction(x[1, 0], coefficient=3.0, attributes={"foo": "bar"})
selected = ising.select_interaction("name == 'x[1][0]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 3
assert selected["name"].values[0] == "x[1][0]"
assert ising._interactions[ising._interactions["name"] == "x[1][0]"]["coefficient"].values[0] == 3.0
assert ising._interactions[ising._interactions["name"] == "x[1][0]"]["attributes.foo"].values[0] == "bar"
# timestamp
ising.add_interaction((x[0, 0], x[0, 1]), coefficient=-4.0, timestamp=1234567890.123)
selected = ising.select_interaction("name == 'x[0][0]*x[0][1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 4
assert selected["name"].values[0] == "x[0][0]*x[0][1]"
assert selected["key"].values[0] == ("x[0][0]", "x[0][1]")
assert selected["interacts"].values[0] == (x[0, 0], x[0, 1])
assert id(selected["interacts"].values[0][0]) == id(x[0, 0])
assert id(selected["interacts"].values[0][1]) == id(x[0, 1])
assert ising._interactions[ising._interactions["name"] == "x[0][0]*x[0][1]"]["coefficient"].values[0] == -4.0
assert ising._interactions[ising._interactions["name"] == "x[0][0]*x[0][1]"]["timestamp"].values[0] == 1234567890.123
# Test key order
ising.add_interaction((x[1, 1], x[1, 0]), coefficient=-4.0, timestamp=1234567890.123)
selected = ising.select_interaction("name == 'x[1][0]*x[1][1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 5
assert selected["name"].values[0] == "x[1][0]*x[1][1]"
assert selected["key"].values[0] == ("x[1][0]", "x[1][1]")
assert selected["interacts"].values[0] == (x[1, 0], x[1, 1])
assert id(selected["interacts"].values[0][0]) == id(x[1, 0])
assert id(selected["interacts"].values[0][1]) == id(x[1, 1])
assert ising._interactions[ising._interactions["name"] == "x[1][0]*x[1][1]"]["coefficient"].values[0] == -4.0
assert ising._interactions[ising._interactions["name"] == "x[1][0]*x[1][1]"]["timestamp"].values[0] == 1234567890.123
def test_logical_model_add_invalid_arguments(ising):
x = ising.variables("x", shape=(3,))
y = ising.variables("y", shape=(2,))
with pytest.raises(ValueError):
ising.add_interaction(target=None)
with pytest.raises(TypeError):
ising.add_interaction()
with pytest.raises(TypeError):
ising.add_interaction("invalid type", coefficient=1.0)
with pytest.raises(TypeError):
ising.add_interaction((x[0], x[1], x[2]), coefficient=1.0)
with pytest.raises(TypeError):
ising.add_interaction(("a", "b"), coefficient=1.0)
with pytest.raises(ValueError):
ising.add_interaction((x[0], x[0]), coefficient=1.0)
# Invalid types
with pytest.raises(TypeError):
ising.add_interaction(x[0], coefficient="invalid type")
with pytest.raises(TypeError):
ising.add_interaction(x[0], scale="invalid type")
with pytest.raises(TypeError):
ising.add_interaction(x[0], attributes="invalid type")
with pytest.raises(TypeError):
ising.add_interaction(x[0], timestamp="invalid type")
# Already added
with pytest.raises(ValueError):
ising.add_interaction(y[0], coefficient=2.0)
ising.add_interaction(y[0], coefficient=2.0)
# Already removed
with pytest.raises(ValueError):
ising.add_interaction(y[1], coefficient=2.0)
ising.remove_interaction(y[1])
ising.add_interaction(y[1], coefficient=2.0)
def test_logical_model_add_duplicate(ising):
x = ising.variables("x", shape=(2,))
ising.add_interaction(x[0], coefficient=1.0)
selected = ising.select_interaction("name == 'x[0]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 1
assert selected["name"].values[0] == "x[0]"
ising.add_interaction(x[0], name="<NAME>", coefficient=1.0)
selected = ising.select_interaction("name == '<NAME>'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert selected["name"].values[0] == "<NAME>"
def test_logical_model_add_duplicate_invalid(ising):
x = ising.variables("x", shape=(2,))
ising.add_interaction(x[0], coefficient=1.0)
ising.add_interaction(x[1], name="<NAME>", coefficient=1.0)
with pytest.raises(ValueError):
ising.add_interaction(x[0], coefficient=2.0)
with pytest.raises(ValueError):
ising.add_interaction(x[1], name="<NAME>", coefficient=2.0)
################################
# Update
################################
def test_logical_model_update(ising):
x = ising.variables("x", shape=(2,))
# initialize
ising.add_interaction(x[0], coefficient=1.0)
ising.add_interaction((x[0], x[1]), coefficient=2.0)
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]"]["coefficient"].values[0] == 1.0
assert ising._interactions[ising._interactions["name"] == "x[0]*x[1]"]["coefficient"].values[0] == 2.0
# update by a variable
ising.update_interaction(x[0], coefficient=10.0)
selected = ising.select_interaction("name == 'x[0]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]"]["coefficient"].values[0] == 10.0
assert selected["name"].values[0] == "x[0]"
assert selected["key"].values[0] == "x[0]"
assert selected["interacts"].values[0] == x[0]
assert id(selected["interacts"].values[0]) == id(x[0])
# update by a target
ising.update_interaction(target=x[0], coefficient=100.0)
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]"]["coefficient"].values[0] == 100.0
# update by a name
ising.update_interaction(name="x[0]", coefficient=1000.0, scale=2.0, attributes={"foo": "bar"})
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]"]["coefficient"].values[0] == 1000.0
assert ising._interactions[ising._interactions["name"] == "x[0]"]["scale"].values[0] == 2.0
assert ising._interactions[ising._interactions["name"] == "x[0]"]["attributes.foo"].values[0] == "bar"
# update by a pair of variables
ising.update_interaction(target=(x[0], x[1]), coefficient=20.0)
selected = ising.select_interaction("name == 'x[0]*x[1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]*x[1]"]["coefficient"].values[0] == 20.0
assert selected["name"].values[0] == "x[0]*x[1]"
assert selected["key"].values[0] == ("x[0]", "x[1]")
assert selected["interacts"].values[0] == (x[0], x[1])
assert id(selected["interacts"].values[0][0]) == id(x[0])
assert id(selected["interacts"].values[0][1]) == id(x[1])
# update by a pair of variables (reversed order)
ising.update_interaction(target=(x[1], x[0]), coefficient=200.0)
selected = ising.select_interaction("name == 'x[0]*x[1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]*x[1]"]["coefficient"].values[0] == 200.0
assert selected["name"].values[0] == "x[0]*x[1]"
assert selected["key"].values[0] == ("x[0]", "x[1]")
assert selected["interacts"].values[0] == (x[0], x[1])
assert id(selected["interacts"].values[0][0]) == id(x[0])
assert id(selected["interacts"].values[0][1]) == id(x[1])
# update by a name
ising.update_interaction(name="x[0]*x[1]", coefficient=2000.0)
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]*x[1]"]["coefficient"].values[0] == 2000.0
def test_logical_model_update_by_custom_names(ising):
y = ising.variables("y", shape=(2,))
n1 = "custom interaction 1"
n2 = "custom interaction 2"
n3 = "custom interaction 3"
# initialize
ising.add_interaction(y[0], coefficient=-1.0, name=n1)
ising.add_interaction((y[0], y[1]), coefficient=-2.0, name=n2)
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == n1]["coefficient"].values[0] == -1.0
assert ising._interactions[ising._interactions["name"] == n2]["coefficient"].values[0] == -2.0
ising.update_interaction(name=n1, coefficient=-10.0)
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == n1]["coefficient"].values[0] == -10.0
ising.update_interaction(name=n2, coefficient=-20.0)
ising._update_interactions_dataframe_from_arrays() # Update the interactions DataFrame for debug
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == n2]["coefficient"].values[0] == -20.0
with pytest.raises(KeyError):
ising.update_interaction(name=n3, coefficient=-30.0)
def test_logical_model_update_without_initialize(ising):
# The following operation will be successful with a UserWarning.
x = ising.variables("x", shape=(3,))
with pytest.warns(UserWarning):
ising.update_interaction(x[0], coefficient=11.0)
selected = ising.select_interaction("name == 'x[0]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 1
assert ising._interactions[ising._interactions["name"] == "x[0]"]["coefficient"].values[0] == 11.0
assert ising._interactions[ising._interactions["name"] == "x[0]"]["scale"].values[0] == 1.0
assert selected["name"].values[0] == "x[0]"
assert selected["key"].values[0] == ("x[0]")
assert selected["interacts"].values[0] == (x[0])
with pytest.warns(UserWarning):
ising.update_interaction((x[0], x[1]), coefficient=22.0, scale=33.0)
selected = ising.select_interaction("name == 'x[0]*x[1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert ising._interactions[ising._interactions["name"] == "x[0]*x[1]"]["coefficient"].values[0] == 22.0
assert ising._interactions[ising._interactions["name"] == "x[0]*x[1]"]["scale"].values[0] == 33.0
assert selected["name"].values[0] == "x[0]*x[1]"
assert selected["key"].values[0] == ("x[0]", "x[1]")
assert selected["interacts"].values[0] == (x[0], x[1])
def test_logical_model_update_invalid(ising):
x = ising.variables("x", shape=(3,))
ising.add_interaction(x[0], coefficient=1.0)
with pytest.raises(ValueError):
ising.update_interaction()
with pytest.raises(ValueError):
ising.update_interaction(x[0], name="x[0]")
with pytest.raises(KeyError):
ising.update_interaction(name="x[1]", coefficient=1.0)
with pytest.raises(TypeError):
ising.update_interaction("invalid type", coefficient=1.0)
with pytest.raises(TypeError):
ising.update_interaction((x[0], x[1], x[2]), coefficient=1.0)
with pytest.raises(TypeError):
ising.update_interaction(("a", "b"), coefficient=1.0)
with pytest.raises(ValueError):
ising.update_interaction((x[0], x[0]), coefficient=1.0)
# Invalid types
with pytest.raises(TypeError):
ising.update_interaction(x[0], coefficient="invalid type")
with pytest.raises(TypeError):
ising.update_interaction(x[0], scale="invalid type")
with pytest.raises(TypeError):
ising.update_interaction(x[0], attributes="invalid type")
with pytest.raises(TypeError):
ising.update_interaction(x[0], timestamp="invalid type")
# Already removed
with pytest.raises(ValueError):
ising.add_interaction(x[1], coefficient=2.0)
ising.remove_interaction(x[1])
ising.update_interaction(x[1], coefficient=2.0)
################################
# Remove
################################
def test_logical_model_remove_by_target(ising):
x = ising.variables("x", shape=(2,))
# initialize
ising.add_interaction(x[0], coefficient=1.0)
ising.add_interaction(x[1], coefficient=10.0)
assert ising._interactions_length == 2
# remove
ising.remove_interaction(x[0])
selected = ising.select_interaction("key == 'x[0]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert selected["name"].values[0] == "x[0]"
assert selected["key"].values[0] == "x[0]"
assert selected["coefficient"].values[0] == 1.0
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
ising.remove_interaction(name="x[1]")
selected = ising.select_interaction("key == 'x[1]'") # Side effect: Internal interactions DataFrame is updated
assert len(ising._interactions) == 2
assert selected["name"].values[0] == "x[1]"
assert selected["key"].values[0] == "x[1]"
assert selected["coefficient"].values[0] == 10.0
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
def test_logical_model_remove_by_name(ising):
x = ising.variables("x", shape=(2,))
# initialize
ising.add_interaction(x[0], "<NAME>", coefficient=1.0)
assert ising._interactions_length == 1
# remove
ising.remove_interaction(name="<NAME>")
assert ising._interactions_length == 1
assert ising.select_interaction("name == '<NAME>'")["removed"].values[0]
def test_logical_model_remove_invalid(ising):
x = ising.variables("x", shape=(3,))
ising.add_interaction(x[0], coefficient=1.0)
with pytest.raises(ValueError):
ising.remove_interaction()
with pytest.raises(ValueError):
ising.remove_interaction(x[0], name="x[0]")
with pytest.raises(KeyError):
ising.remove_interaction(x[1])
with pytest.raises(TypeError):
ising.remove_interaction("invalid type")
with pytest.raises(TypeError):
ising.update_interaction((x[0], x[1], x[2]))
with pytest.raises(TypeError):
ising.update_interaction(("a", "b"))
with pytest.raises(ValueError):
ising.update_interaction((x[0], x[0]))
################################
# Delete
################################
def test_logical_model_delete(ising):
x = ising.variables("x", shape=(2, 3, 4))
y = ising.variables("y", shape=(5, 6)) # noqa: F841
ising.add_interaction(x[0, 0, 0], coefficient=1.0)
assert ising._interactions_length == 1
ising.add_interaction((x[0, 0, 0], x[0, 0, 1]), coefficient=2.0)
assert ising._interactions_length == 2
assert ising.get_size() == 2 * 3 * 4 + 5 * 6
assert ising.get_deleted_size() == 0
assert ising.get_all_size() == 2 * 3 * 4 + 5 * 6
# Set dirty flags for interactions releted to the deleting variable
ising.delete_variable(x[0, 0, 0])
assert ising._interactions_length == 2
assert ising.get_size() == 2 * 3 * 4 + 5 * 6 - 1
assert ising.get_deleted_size() == 1
assert ising.get_all_size() == 2 * 3 * 4 + 5 * 6
# Convert physical to resolve dirty
ising.to_physical()
assert len(ising._interactions_array["name"]) == 0
assert ising._interactions_length == 0
assert ising.get_size() == 2 * 3 * 4 + 5 * 6 - 1
assert ising.get_deleted_size() == 1
assert ising.get_all_size() == 2 * 3 * 4 + 5 * 6
def test_logical_model_delete_dealing_with_nhot_constraints_qubo():
model = LogicalModel(mtype="qubo")
x = model.variables("x", shape=(4,))
default_label = "Default N-hot Constraint"
model.add_constraint(NHotConstraint(x, n=1, strength=1.0))
assert len(model.get_constraints()) == 1
assert default_label in model.get_constraints()
assert model.get_constraints_by_label(default_label)._n == 1
assert len(model.get_constraints_by_label(default_label)._variables) == 4
model.delete_variable(x[0])
assert len(model.get_constraints()) == 1
assert default_label in model.get_constraints()
assert model.get_constraints_by_label(default_label)._n == 1
assert len(model.get_constraints_by_label(default_label)._variables) == 3
physical = model.to_physical()
assert physical._raw_interactions[constants.INTERACTION_LINEAR]["x[1]"] == 1.0
assert physical._raw_interactions[constants.INTERACTION_QUADRATIC][("x[1]", "x[2]")] == -2.0
def test_logical_model_delete_invalid_argument(ising):
with pytest.raises(TypeError):
ising.delete_variable()
with pytest.raises(TypeError):
ising.delete_variable("invalid type")
with pytest.raises(ValueError):
ising.delete_variable(target=None)
################################
# Fix
################################
def test_logical_model_fix_ising(ising):
x = ising.variables("x", shape=(6,))
ising.add_interaction(x[0], coefficient=10.0, scale=1.1)
ising.add_interaction(x[1], coefficient=20.0)
ising.add_interaction(x[2], coefficient=30.0)
ising.add_interaction((x[1], x[2]), coefficient=40.0)
ising.add_interaction((x[1], x[2]), name="my interaction", coefficient=45.0)
ising.add_interaction(x[3], coefficient=50.0)
ising.add_interaction(x[4], coefficient=60.0)
ising.add_interaction((x[3], x[4]), coefficient=70.0)
with pytest.raises(ValueError):
ising.fix_variable(target=x[0], value=0)
# Remove a variable that has no 2-body interactions.
ising.fix_variable(x[0], 1)
assert ising.get_fixed_size() == 1
assert "x[0]" in ising.get_fixed_array()
assert ising.get_offset() == -11.0
selected = ising.select_interaction("name == 'x[0]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
# Remove variables that has 2-body interactions.
ising.fix_variable(x[1], -1)
assert ising.get_fixed_size() == 2
assert "x[1]" in ising.get_fixed_array()
assert ising.get_offset() == -11.0 + 20.0
selected = ising.select_interaction("name == 'x[1]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = ising.select_interaction("name == 'x[1]*x[2]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = ising.select_interaction("name == 'my interaction'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = ising.select_interaction("name == 'x[2] (before fixed: x[1]*x[2])'")
assert selected["body"].values[0] == 1
assert selected["key"].values[0] == "x[2]"
assert selected["coefficient"].values[0] == -40.0
assert selected["scale"].values[0] == 1.0
assert selected["dirty"].values[0]
assert not selected["removed"].values[0]
selected = ising.select_interaction("name == 'x[2] (before fixed: my interaction)'")
assert selected["body"].values[0] == 1
assert selected["key"].values[0] == "x[2]"
assert selected["coefficient"].values[0] == -45.0
assert selected["scale"].values[0] == 1.0
assert selected["dirty"].values[0]
assert not selected["removed"].values[0]
ising.fix_variable(x[3], 1)
assert ising.get_fixed_size() == 3
assert "x[3]" in ising.get_fixed_array()
assert ising.get_offset() == -11.0 + 20.0 - 50.0
selected = ising.select_interaction("name == 'x[3]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = ising.select_interaction("name == 'x[3]*x[4]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = ising.select_interaction("name == 'x[4] (before fixed: x[3]*x[4])'")
assert selected["body"].values[0] == 1
assert selected["key"].values[0] == "x[4]"
assert selected["coefficient"].values[0] == 70.0
assert selected["scale"].values[0] == 1.0
assert selected["dirty"].values[0]
assert not selected["removed"].values[0]
# Nothing happens
with pytest.warns(UserWarning):
ising.fix_variable(x[5], 1)
def test_logical_model_fix_qubo(qubo):
a = qubo.variables("a", shape=(6,))
qubo.add_interaction(a[0], coefficient=10.0, scale=1.1)
qubo.add_interaction(a[1], coefficient=20.0)
qubo.add_interaction(a[2], coefficient=30.0)
qubo.add_interaction((a[1], a[2]), coefficient=40.0)
qubo.add_interaction((a[1], a[2]), name="my interaction", coefficient=45.0)
qubo.add_interaction(a[3], coefficient=50.0)
qubo.add_interaction(a[4], coefficient=60.0)
qubo.add_interaction((a[3], a[4]), coefficient=70.0)
with pytest.raises(ValueError):
qubo.fix_variable(target=a[0], value=-1)
# Remove a variable that has no 2-body interactions.
qubo.fix_variable(a[0], 1)
assert qubo.get_fixed_size() == 1
assert "a[0]" in qubo.get_fixed_array()
assert qubo.get_offset() == -11.0
selected = qubo.select_interaction("name == 'a[0]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
# Remove variables that has 2-body interactions.
qubo.fix_variable(a[2], 1)
assert qubo.get_fixed_size() == 2
assert "a[2]" in qubo.get_fixed_array()
assert qubo.get_offset() == -11.0 - 30.0
selected = qubo.select_interaction("name == 'a[2]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = qubo.select_interaction("name == 'a[1]*a[2]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = qubo.select_interaction("name == 'my interaction'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = qubo.select_interaction("name == 'a[1] (before fixed: a[1]*a[2])'")
assert selected["body"].values[0] == 1
assert selected["key"].values[0] == "a[1]"
assert selected["coefficient"].values[0] == 40.0
assert selected["scale"].values[0] == 1.0
assert selected["dirty"].values[0]
assert not selected["removed"].values[0]
selected = qubo.select_interaction("name == 'a[1] (before fixed: my interaction)'")
assert selected["body"].values[0] == 1
assert selected["key"].values[0] == "a[1]"
assert selected["coefficient"].values[0] == 45.0
assert selected["scale"].values[0] == 1.0
assert selected["dirty"].values[0]
assert not selected["removed"].values[0]
qubo.fix_variable(a[4], 0)
selected = qubo.select_interaction("name == 'a[4]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
selected = qubo.select_interaction("name == 'a[3]*a[4]'")
assert selected["dirty"].values[0]
assert selected["removed"].values[0]
# Nothing happens
with pytest.warns(UserWarning):
qubo.fix_variable(a[5], 1)
def test_logical_model_fix_invalid_argument(ising):
with pytest.raises(TypeError):
ising.fix_variable()
with pytest.raises(TypeError):
ising.fix_variable("invalid type")
with pytest.raises(ValueError):
ising.fix_variable(target=None, value=1)
|
StarcoderdataPython
|
394392
|
<filename>aircraftdata.py<gh_stars>10-100
# Using the api.joshdouch.me calls to get data from ICAO hex
import requests
def regis(hex):
"""
Gets registration from hex
"""
if hex == None:
return None
regis = requests.get(f"https://api.joshdouch.me/hex-reg.php?hex={hex}")
return regis.text
def plane(hex):
"""
Gets plane from hex
"""
if hex == None:
return None
plane = requests.get(f"https://api.joshdouch.me/hex-type.php?hex={hex}")
return plane.text
def oper(hex):
"""
Gets plane from hex
"""
if hex == None:
return None
oper = requests.get(f"https://api.joshdouch.me/hex-airline.php?hex={hex}")
return oper.text
|
StarcoderdataPython
|
132132
|
import telebot
from telebot import types
chave = "<KEY>"
bot = telebot.TeleBot(chave)
@bot.message_handler(commands=["badalo"])
def responder(mensagem):
bot.reply_to(mensagem,"badalado em você")
@bot.message_handler(commands=["redes"])
def responder(mensagem):
bot.reply_to(mensagem,"Jownao se encontra atualmente nessas redes sociais:\n"+
"Instagram: instagram.com/jownao\n"+
"Twitch: twitch.tv/jownao\n"+
"Github: github.com/Jownao")
@bot.message_handler(commands=["amigos"])
def responder(mensagem):
bot.reply_to(mensagem,"Amigos:\n"+
"Carlo\nRulio\nVicsk\nAlbert\nzaboyz ltda")
@bot.message_handler(func=lambda m: True)
def echo_all(message):
if message == "oi":
bot.reply_to(message, "Iae de boa ?")
else:
bot.reply_to(message, message.text)
chat_id = "798429701"
# or add KeyboardButton one row at a time:
markup = types.ReplyKeyboardMarkup()
itembtna = types.KeyboardButton('Badalo')
itembtnv = types.KeyboardButton('de')
itembtnc = types.KeyboardButton('Sino')
itembtnd = types.KeyboardButton('Macho')
itembtne = types.KeyboardButton('Veio')
markup.row(itembtna, itembtnv)
markup.row(itembtnc, itembtnd, itembtne)
bot.send_message(chat_id, "Escolha oque desejar meu caro:", reply_markup=markup)
bot.polling()
|
StarcoderdataPython
|
1841599
|
<filename>weeconsume.py
# consume notifications from redis
import sys
import json
import subprocess
import redis
import time
def growl(message):
# args = ['growlnotify', '-m', message]
args = ['notify-send', 'IRC', message]
subprocess.check_call(args)
def highlight_decision(event):
if event['hilight']:
return True
elif 'notify_private' in event['tags']:
return True
elif event['buffer_name'] == 'bitlbee.#naaya':
return True
elif event['buffer_is_front']:
return True
else:
return False
def consume_json(e):
event = json.loads(e)
# print event
if highlight_decision(event):
growl("%(prefix)s | %(msg)s" % event)
def main():
redis_port = int(sys.argv[1])
server = redis.Redis(port=redis_port, db=13)
while True:
msg = server.lpop('weechat')
if msg != None:
consume_json(msg)
time.sleep(5)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9751896
|
<filename>Competitive Programming/Binary Search Trees/Given n appointments, find all conflicting appointments.py
'''https: // www.geeksforgeeks.org/given-n-appointments-find-conflicting-appointments/
Given n appointments, find all conflicting appointments.
Examples:
Input: appointments[] = {{1, 5} {3, 7}, {2, 6}, {10, 15}, {5, 6}, {4, 100}}
Output: Following are conflicting intervals
[3, 7] Conflicts with [1, 5]
[2, 6] Conflicts with [1, 5]
[5, 6] Conflicts with [3, 7]
[4, 100] Conflicts with [1, 5]
An appointment is conflicting if it conflicts with any of the previous appointments in the array.
We strongly recommend to minimize the browser and try this yourself first.
A Simple Solution is to one by one process all appointments from the second appointment to last. For every appointment i, check if it conflicts with i-1, i-2, … 0. The time complexity of this method is O(n2).
We can use Interval Tree to solve this problem in O(nLogn) time. Following is a detailed algorithm.
1) Create an Interval Tree, initially with the first appointment.
2) Do following for all other appointments starting from the second one.
a) Check if the current appointment conflicts with any of the existing
appointments in Interval Tree. If conflicts, then print the current
appointment. This step can be done O(Logn) time.
b) Insert the current appointment in Interval Tree. This step also can
be done O(Logn) time.
Following is the implementation of the above idea.
'''
# Python3 program to print all conflicting
# appointments in a given set of appointments
# Structure to represent an interval
class Interval:
def __init__(self):
self.low = None
self.high = None
# Structure to represent a node
# in Interval Search Tree
class ITNode:
def __init__(self):
self.max = None
self.i = None
self.left = None
self.right = None
def newNode(j):
# print(j)
temp = ITNode()
temp.i = j
temp.max = j[1]
return temp
# A utility function to check if
# given two intervals overlap
def doOVerlap(i1, i2):
if (i1[0] < i2[1] and i2[0] < i1[1]):
return True
return False
# Function to create a new node
def insert(node, data):
global succ
# If the tree is empty, return a new node
root = node
if (node == None):
return newNode(data)
# If key is smaller than root's key, go to left
# subtree and set successor as current node
# print(node)
if (data[0] < node.i[0]):
# print(node)
root.left = insert(node.left, data)
# Go to right subtree
else:
root.right = insert(node.right, data)
if root.max < data[1]:
root.max = data[1]
return root
# The main function that searches a given
# interval i in a given Interval Tree.
def overlapSearch(root, i):
# Base Case, tree is empty
if (root == None):
return None
# If given interval overlaps with root
if (doOVerlap(root.i, i)):
return root.i
# If left child of root is present and
# max of left child is greater than or
# equal to given interval, then i may
# overlap with an interval is left subtree
if (root.left != None and root.left.max >= i[0]):
return overlapSearch(root.left, i)
# Else interval can only overlap
# with right subtree
return overlapSearch(root.right, i)
# This function prints all conflicting
# appointments in a given array of
# appointments.
def printConflicting(appt, n):
# Create an empty Interval Search Tree,
# add first appointment
root = None
root = insert(root, appt[0])
# Process rest of the intervals
for i in range(1, n):
# If current appointment conflicts
# with any of the existing intervals,
# print it
res = overlapSearch(root, appt[i])
if (res != None):
print("[", appt[i][0], ",", appt[i][1],
"] Conflicts with [", res[0],
",", res[1], "]")
# Insert this appointment
root = insert(root, appt[i])
# Driver code
if __name__ == '__main__':
# Let us create interval tree
# shown in above figure
appt = [[1, 5], [3, 7],
[2, 6], [10, 15],
[5, 6], [4, 100]]
n = len(appt)
print("Following are conflicting intervals")
printConflicting(appt, n)
|
StarcoderdataPython
|
1763738
|
<reponame>hwikene/steam-wishlist
from homeassistant import config_entries, core
from .const import DOMAIN
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Defer sensor setup to the sensor manager."""
await hass.data[DOMAIN][config_entry.entry_id].async_register_component(
"binary_sensor", async_add_entities
)
|
StarcoderdataPython
|
4816273
|
<reponame>banteg/woofy-snapshot
import math
from collections import Counter
from fractions import Fraction
from brownie import Contract
from brownie.convert import EthAddress
from eth_abi.exceptions import InsufficientDataBytes
from scripts.snapshot import UNISWAP_V3_FACTORY
from camera_shy.common import (
NFT_POSITION_MANAGER,
UNISWAP_V3_FACTORY,
eth_call,
get_code,
memory,
)
from camera_shy.multicall import fetch_multicall, fetch_multicall_batched
@memory.cache()
def is_uniswap_v3_pool(address):
if not get_code(address):
return False
try:
return EthAddress(eth_call(address, "factory()(address)")) == UNISWAP_V3_FACTORY
except (ValueError, InsufficientDataBytes):
return False
@memory.cache()
def fetch_uniswap_v3_positions(block):
manager = Contract(NFT_POSITION_MANAGER)
total_supply = manager.totalSupply(block_identifier=block)
ids = fetch_multicall_batched(
[[manager, "tokenByIndex", i] for i in range(total_supply)], block=block
)
positions = fetch_multicall_batched(
[[manager, "positions", i] for i in ids], block=block
)
return {token_id: position.dict() for token_id, position in zip(ids, positions)}
def filter_positions_of_pool(pool, positions):
token0, token1, fee = fetch_multicall(
[[pool, key] for key in ["token0", "token1", "fee"]]
)
return {
i: pos
for i, pos in positions.items()
if (pos["token0"], pos["token1"], pos["fee"]) == (token0, token1, fee)
}
def unwrap_liquidity(pool, token, positions, block=None, min_balance=0):
manager = Contract(NFT_POSITION_MANAGER)
positions = filter_positions_of_pool(pool, positions)
total_liquidity = sum(pos["liquidity"] for pos in positions.values())
total_balance = token.balanceOf(pool, block_identifier=block)
owners = fetch_multicall([[manager, "ownerOf", i] for i in positions], block=block)
user_balances = Counter()
for i, owner in zip(positions, owners):
user_balances[owner] += (
Fraction(positions[i]["liquidity"], total_liquidity) * total_balance
)
return {
user: int(tokens)
for user, tokens in user_balances.most_common()
if tokens >= min_balance
}
|
StarcoderdataPython
|
3367573
|
<reponame>UnixJunkie/selfies<gh_stars>1-10
"""This script is specifically for testing the large eMolecules dataset,
which upon downloading, should have the file name version.smi.gz.
This test will automatically create a checkpoint file, so that the test
can be paused and resumed easily. Please note that there are some SMILES
in the dataset that are expected to fail; these are SMILES which use
the same ring-bond number across a dot symbol.
Example: C1.C2.C12, COC(=O)C1CCCN1CP123OCCO1.C(CO2)O3
"""
import faulthandler
import os
import pandas as pd
import pytest
from rdkit.Chem import MolFromSmiles, MolToSmiles
import selfies as sf
faulthandler.enable()
# Test Configuration ==========================================================
# file path
curr_dir = os.path.dirname(__file__)
EMOL_PATH = os.path.join(curr_dir, 'test_sets', 'version.smi.gz')
# SMILES in eMolecules are under this column name
COL_NAME = 'isosmiles'
# Tests =======================================================================
# TODO: Comment out the pytest skip to use this script.
@pytest.mark.skip(reason="eMolecules dataset not on GitHub")
def test_roundtrip_translation():
"""Tests a roundtrip SMILES -> SELFIES -> SMILES translation of the
SMILES examples in QM9, NonFullerene, Zinc, etc.
"""
# modify constraints
constraints = sf.get_semantic_constraints()
constraints['N'] = 6
constraints['Br'] = 7
constraints['Cl'] = 7
constraints['I'] = 7
sf.set_semantic_constraints(constraints)
# file I/O
ckpt_path = os.path.join(curr_dir, 'checkpoints', 'emolecule_ckpt.txt')
error_path = os.path.join(curr_dir, 'error_sets', 'errors_emolecules.csv')
# check if a previous checkpoint exists to continue tests
if os.path.exists(ckpt_path):
with open(ckpt_path, 'r') as ckpt_file:
checkpoint = int(ckpt_file.readlines()[0])
# if no path to a checkpoint exists,
# create a new directory for error logging and checkpoints
else:
os.makedirs(os.path.dirname(ckpt_path), exist_ok=True)
os.makedirs(os.path.dirname(error_path), exist_ok=True)
with open(error_path, "w+") as error_log:
error_log.write("In, Out\n")
checkpoint = -1
error_list = []
error_found_flag = False
# make pandas reader
reader = pd.read_csv(EMOL_PATH,
chunksize=10000,
compression='gzip',
delimiter=' ',
header=0)
# roundtrip testing
for chunk_idx, chunk in enumerate(reader):
if chunk_idx <= checkpoint:
continue
for in_smiles in chunk[COL_NAME]:
# check if SMILES in chunk is a valid RDKit molecule.
# if not, skip testing
# All inputted SMILES must be valid
# RDKit Mol objects to be encoded.
if (MolFromSmiles(in_smiles) is None) or ('*' in in_smiles):
continue
# encode selfies
selfies = sf.encoder(in_smiles)
# if unable to encode SMILES, write to list of errors
if selfies is None:
error_list.append((in_smiles, ''))
continue
# take encoeded SELFIES and decode
out_smiles = sf.decoder(selfies)
# compare original SMILES to decoded SELFIE string.
# if not the same string, write to list of errors.
if not is_same_mol(in_smiles, out_smiles):
error_list.append((in_smiles, out_smiles))
# open and write all errors to errors_emolecule.csv
with open(error_path, "a") as error_log:
for error in error_list:
error_log.write(','.join(error) + "\n")
error_found_flag = error_found_flag or error_list
error_list = []
# create checkpoint from the current pandas reader chunk,
# to load from and continue testing.
with open(ckpt_path, 'w+') as ckpt_file:
ckpt_file.write(str(chunk_idx))
sf.set_semantic_constraints() # restore defaults
os.remove(ckpt_path) # remove checkpoint
assert not error_found_flag
# Helper Methods
def is_same_mol(smiles1, smiles2):
"""Helper method that returns True if smiles1 and smiles2 correspond
to the same molecule.
"""
if smiles1 is None or smiles2 is None:
return False
m1 = MolFromSmiles(smiles1)
m2 = MolFromSmiles(smiles2)
if m1 is None or m2 is None:
return False
can1 = MolToSmiles(m1)
can2 = MolToSmiles(m2)
return can1 == can2
|
StarcoderdataPython
|
3537536
|
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
# Tweet retrieval script
#
# Written by <NAME>
# April 2018
# dependencies
import datetime
import json
from newsSourcesENTT import english_sources_twitter
import sys
from twitterApiHandle import api
# English news
def news(screen_names = english_sources_twitter, date = datetime.date.today().isoformat()):
tweetCount = 0
print(file = sys.stderr)
for screen_name in screen_names:
for status in api.GetSearch(term = 'from:' + str(screen_name), until = date, count = 100) + api.GetSearch(term = 'from:' + str(screen_name), since = date, count = 100):
tweet = json.loads(str(status))
tweet['entities'] = {'urls': tweet['urls']}
del tweet['urls']
yield tweet
tweetCount += 1
print('\x1b[A\x1b[2K\r' + str(tweetCount), file = sys.stderr)
# live stream
def filter(track = [], follow = [], limit = 3000, englishOnly = True, keepRT = True):
tweetCount = 0
s = None
if len(track) == 0 and len(follow) == 0:
s = api.GetStreamSample()
else:
if englishOnly:
s = api.GetStreamFilter(track = track, follow = follow, languages = ['en'])
else:
s = api.GetStreamFilter(track = track, follow = follow)
print(file = sys.stderr)
for message in s:
# if we are done
if limit > 0 and tweetCount >= limit:
break
# only keep tweets
if 'text' not in message:
continue
# only keep tweets in English
if englishOnly and message['lang'] != 'en':
continue
# filter out retweets
if (not keepRT) and ('retweeted_status' in message):
continue
# kept tweet
yield message
tweetCount += 1
print('\x1b[A\x1b[2K\r' + str(tweetCount), file = sys.stderr)
# read from file
def fromFile(fName, keepRT = True):
with open(fName) as f:
tweetCount = 0
print(file = sys.stderr)
for line in f:
tweet = json.loads(line)
if (not keepRT) and tweet['text'][:2] == 'RT':
continue
yield tweet
tweetCount += 1
print('\x1b[A\x1b[2K\r' + str(tweetCount), file = sys.stderr)
# get tweet IDs from a file
def fromIDsFile(fName, keepRT = True):
with open(fName) as f:
notFound = []
tweetCount = 0
print(file = sys.stderr)
lines = [l for l in f]
lines100 = [lines[100*i:100*i+100] for i in range(int(len(lines) / 100 + 1))]
for i in range(len(lines100)):
lines100[i] = [line.split('\t')[0] for line in lines100[i]]
for tIter, tweetIDs in enumerate(lines100):
try:
tweets = [json.loads(str(s)) for s in api.GetStatuses(tweetIDs)]
notFound += [tweetID for tweetID in tweetIDs if tweetID not in [tweet['id_str'] for tweet in tweets]]
for tweet in tweets:
if (not keepRT) and tweet['text'][:2] == 'RT':
continue
yield tweet
tweetCount += 1
except Exception as e:
print(str(e) + '\n', file = sys.stderr)
pass
print('\x1b[A\x1b[2K\r' + str(tIter + 1) + '/' + str(len(lines100)) + ' ' + str(tweetCount) + ' kept', file = sys.stderr)
print('\n'.join([json.dumps({'id_str': nf, 'id': int(nf)}) for nf in notFound]))
|
StarcoderdataPython
|
1409
|
import sklearn.linear_model
from autosklearn.pipeline.components.classification.passive_aggressive import \
PassiveAggressive
from .test_base import BaseClassificationComponentTest
class PassiveAggressiveComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.92
res["iris_n_calls"] = 5
res["default_iris_iterative"] = 0.92
res["iris_iterative_n_iter"] = 32
res["default_iris_proba"] = 0.29271032477461295
res["default_iris_sparse"] = 0.4
res["default_digits"] = 0.9156041287188829
res["digits_n_calls"] = 6
res["default_digits_iterative"] = 0.9156041287188829
res["digits_iterative_n_iter"] = 64
res["default_digits_binary"] = 0.9927140255009107
res["default_digits_multilabel"] = 0.90997912489192
res["default_digits_multilabel_proba"] = 1.0
res['ignore_hps'] = ['max_iter']
sk_mod = sklearn.linear_model.PassiveAggressiveClassifier
module = PassiveAggressive
step_hyperparameter = {
'name': 'max_iter',
'value': module.get_max_iter(),
}
|
StarcoderdataPython
|
375582
|
################################################################################
## Copyright (c) 2019, <NAME> & <NAME>
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice
## this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## 3. Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
################################################################################
from proto import environment_projections_pb2
import numpy as np
class EnvironmentInterpreter():
def __init__(self, projections_filename):
f = open(projections_filename, 'rb')
self.environment_projections = \
environment_projections_pb2.EnvironmentProjections()
self.environment_projections.ParseFromString(f.read())
self.num_projections = len(self.environment_projections.projections)
# searches through a batch of workspaces to find one containing the
# corresponding (x, y)
def get_current_workspace(self, x, y, batch):
workspaces = []
for workspace in batch:
if not self.is_outer_workspace_area_cell(x, y, workspace):
workspaces.append(workspace)
if len(workspaces) > 0:
return workspaces
return [-1]
# Builds a numpy array of all the cells in a workspace for rendering
def get_workspace_map(self, workspace):
projection = self.environment_projections.projections[workspace];
data = list()
for cell in projection.data:
data.append(cell)
data = np.reshape(np.asarray(data), (self.environment_projections.rows, -1))
return data
# Maps an (x, y, workspace) tuple to the surface(s) it belongs to
# if the (x, y, workspace) is a gate then there are two surfaces
def get_workspace_indices(self, x, y, workspace):
projection = self.environment_projections.projections[workspace]
cols = self.environment_projections.cols
rows = self.environment_projections.rows
workspace_2d_idx = y*cols + x
surface_z = projection.workspace_2d_to_surface_3d[workspace_2d_idx]
surface_3d_idx = (surface_z * rows * cols) + (y * cols) + x;
return self.environment_projections.surface_3d_to_indices[surface_3d_idx]
# returns the type of cell at (x, y, workspace)
def get_projected_cell(self, x, y, workspace):
projection = self.environment_projections.projections[workspace]
idx = int(y)*self.environment_projections.cols + int(x)
return projection.data[idx]
def get_projection_size(self):
return self.environment_projections.rows, self.environment_projections.cols
def is_outer_workspace_area_cell(self, x, y, workspace):
cell = self.get_projected_cell(x, y, workspace)
return cell == environment_projections_pb2.EnvironmentProjection.OUTER_WORKSPACE_AREA_CELL
def is_gate_cell(self, x, y, workspace):
cell = self.get_projected_cell(x, y, workspace)
return cell == environment_projections_pb2.EnvironmentProjection.GATE_CELL
def is_free_cell(self, x, y, workspace):
cell = self.get_projected_cell(x, y, workspace)
return cell == environment_projections_pb2.EnvironmentProjection.FREE_CELL
|
StarcoderdataPython
|
1934949
|
<filename>snapshottest/parse_env.py<gh_stars>0
import os
def _env_bool(val):
return val.lower() in ["1", "yes", "true", "t", "y"]
def env_snapshot_update():
return _env_bool(os.environ.get("SNAPSHOT_UPDATE", "false"))
|
StarcoderdataPython
|
140513
|
# Generated by Django 3.2.3 on 2021-05-17 02:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='SubCorridor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('floor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.floor')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MainCorridor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('floor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.floor')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Light',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('consumption_unit', models.IntegerField(default=5)),
('turned_on', models.BooleanField(default=True)),
('main_corridor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.maincorridor')),
('sub_corridor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.subcorridor')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='floor',
name='hotel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.hotel'),
),
migrations.CreateModel(
name='AirConditioner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('consumption_unit', models.IntegerField(default=5)),
('turned_on', models.BooleanField(default=True)),
('main_corridor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.maincorridor')),
('sub_corridor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hotel.subcorridor')),
],
options={
'abstract': False,
},
),
]
|
StarcoderdataPython
|
1984606
|
from backend.common_tile import CommonTile
import math
class Farm(CommonTile):
def __init__(self):
super().__init__()
self.food = 1
self.production = 0
self.gold = 0
self.housing = .5
self.acceptable_terrain = [
'grassland',
'plains',
]
self.acceptable_features = [
'floodplains',
'volcanic_soil'
]
self.resources = [
'wheat',
'rice',
'maize',
]
self.hills = True
def calculate_adjacency(self, tile_obj, target_index, adj_list): # pragma: no cover
target_object = getattr(tile_obj, target_index)
adj_count = 0
for adj_obj in adj_list:
if adj_obj is None:
continue
if isinstance(adj_obj.improvement, Farm):
adj_count += 1
if tile_obj.erah >= 2:
target_object.food = target_object.food + math.floor(adj_count / 2)
if tile_obj.erah >= 5:
target_object.food = target_object.food + adj_count
|
StarcoderdataPython
|
5195261
|
"""
Objective
In this challenge, we will use loops to do some math. Check out the Tutorial tab to learn more.
Task
Given an integer, , print its first multiples. Each multiple (where ) should be printed on a new line in the form: n x i = result.
Example
The printout should look like this:
3 x 1 = 3
3 x 2 = 6
3 x 3 = 9
3 x 4 = 12
3 x 5 = 15
3 x 6 = 18
3 x 7 = 21
3 x 8 = 24
3 x 9 = 27
3 x 10 = 30
Input Format
A single integer, .
Constraints
Output Format
Print lines of output; each line (where ) contains the of in the form:
n x i = result.
Sample Input
2
Sample Output
2 x 1 = 2
2 x 2 = 4
2 x 3 = 6
2 x 4 = 8
2 x 5 = 10
2 x 6 = 12
2 x 7 = 14
2 x 8 = 16
2 x 9 = 18
2 x 10 = 20
"""
#!/bin/python3
import math
import os
import random
import re
import sys
n = int(input())
i=1
if(n>=1 and n<=20):
while(i<11):
mul=n*i
print(n, "x", i, "=", mul)
i=i+1
|
StarcoderdataPython
|
1607835
|
<gh_stars>0
from dataclasses import dataclass
from bindings.csw.derived_crstype_type import DerivedCrstypeType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class DerivedCrstype(DerivedCrstypeType):
class Meta:
name = "derivedCRSType"
namespace = "http://www.opengis.net/gml"
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.