hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8082f1e3f5f385cac811686714cd680277f4584
| 7,406 |
py
|
Python
|
repro_eval/__main__.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 8 |
2020-10-27T02:11:53.000Z
|
2022-03-02T11:00:10.000Z
|
repro_eval/__main__.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 2 |
2021-01-25T19:59:39.000Z
|
2021-12-07T09:29:01.000Z
|
repro_eval/__main__.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 1 |
2021-04-16T16:21:16.000Z
|
2021-04-16T16:21:16.000Z
|
"""
Use repro_eval from the command line with e.g.
python -m repro_eval -t rpd -q qrel_orig -r orig_b rpd_b
python -m repro_eval -t rpd -q qrel_orig -r orig_b orig_a rpd_b rpd_a
python -m repro_eval -t rpd -m rmse -q qrel_orig -r orig_b rpd_b
python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b rpl_b
python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b orig_a rpl_b rpl_a
after having installed the Python package.
For other more specific examples also have a look at the README file.
Depending on the provided parameters and input run files,
evaluation measures will be printed.
"""
import argparse
from repro_eval.Evaluator import RpdEvaluator, RplEvaluator
from repro_eval.util import print_simple_line, print_base_adv
from repro_eval.util import arp
if __name__ == "__main__":
main()
| 43.309942 | 109 | 0.498785 |
f809139d6c632c257d27b2da4aee81ff3ca5dcc2
| 2,377 |
py
|
Python
|
main.py
|
juligreen/towerdefense-prototype
|
1cdac58acf697ca856a60dec6533caed17acf656
|
[
"MIT"
] | null | null | null |
main.py
|
juligreen/towerdefense-prototype
|
1cdac58acf697ca856a60dec6533caed17acf656
|
[
"MIT"
] | null | null | null |
main.py
|
juligreen/towerdefense-prototype
|
1cdac58acf697ca856a60dec6533caed17acf656
|
[
"MIT"
] | null | null | null |
import math
from game_objects import Turret, Troop
players = []
def calculate_distance(entity1: Location, entity2: Location) -> float:
# distance between vectors: https://brilliant.org/wiki/distance-formula/
distance = math.sqrt((entity1.x - entity2.x) ** 2 + (entity1.y + entity2.y) ** 2)
return distance
init()
while True:
# most of this is pseudocode, as I have no way of handling user input currently
for index, player in enumerate(players):
if 'player places turret':
player.add_turret(Location(1, 1))
if 'player places troops':
player.add_troops('bla')
for troop in player.troops:
troop.move()
player.turret_fire_check()
if player.health <= 0:
print(f'Player {index} won the game!')
| 30.088608 | 94 | 0.636096 |
f8094b25e0893a5bce69fe2d108d090003595a0e
| 7,110 |
py
|
Python
|
bib_processing.py
|
GAIGResearch/GAIGResearch.github.io
|
90d0555348ad8f3f500b6480168ad65fa0226dce
|
[
"MIT"
] | null | null | null |
bib_processing.py
|
GAIGResearch/GAIGResearch.github.io
|
90d0555348ad8f3f500b6480168ad65fa0226dce
|
[
"MIT"
] | null | null | null |
bib_processing.py
|
GAIGResearch/GAIGResearch.github.io
|
90d0555348ad8f3f500b6480168ad65fa0226dce
|
[
"MIT"
] | 2 |
2019-07-09T11:08:15.000Z
|
2020-12-04T14:55:00.000Z
|
import os
from pathlib import Path
from difflib import SequenceMatcher
supported_bibtex_types = {"article", "book", "booklet", "inbook", "incollection", "inproceedings", "manual",
"mastersthesis", "misc", "phdthesis", "proceedings", "techreport", "unpublished"}
supported_fields = ["author", "title", "year", "month", "pages", "note",
"journal", "booktitle",
"volume", "number", "series", "edition",
"editor", "publisher", "address",
"howpublished", "type",
"chapter",
"organization", "school", "institution"]
extra_fields = ["doi", "issn", "isbn", "keywords", "abstract", "url", "archivePrefix", "eprint", "timestamp", "biburl",
"bibsource"]
data_path = Path("_data/papers.yml")
bib_path = Path("bibfiles")
year_from = 2017
similarity_threshold = 0.8
def find_all_files(path_to_search):
"""Recursively find all bib files in root path given"""
list_of_files = os.listdir(path_to_search)
all_files = []
# Iterate over all the entries
for e in list_of_files:
# Create full path
full_path = path_to_search / e
# If entry is a directory then get the list of files in this directory
if os.path.isdir(full_path):
all_files = all_files + find_all_files(full_path)
elif full_path.with_suffix(".bib"):
all_files.append(full_path)
return all_files
def process_entry(entry_to_process):
"""
Turns a string of an entry into a dictionary mapping from fields to field values
:param entry_to_process
:return: dictionary.
"""
dict_entry = {}
entry_lines = entry_to_process.split("\n")
first_line = entry_lines[0].split("=")
entry_type = first_line[0].replace("@", "")
entry_id = first_line[1]
# Type validation
if entry_type.lower() not in supported_bibtex_types:
print("Type " + entry_type + " not supported for bibtex entry " + entry_id)
return dict_entry
dict_entry["id"] = entry_id
dict_entry["type"] = entry_type
# Process the rest of the fields
field_value = "" # Keep this up here to be able to access previous values in case of multi-line field
field = ""
for l in entry_lines:
split_line = l.split("=")
if len(split_line) == 1 and field != "": # No = found on this line, it's a multi-line field
field_value += " " + split_line[0].strip()
dict_entry[field] = field_value.strip()
else:
field = split_line[0].strip()
field_value = split_line[1].strip()
if field.lower() in supported_fields or field.lower() in extra_fields:
if field.lower() == "pages" and "--" not in field_value:
field_value = field_value.replace("-", "--")
dict_entry[field] = field_value
# Try to find pdf of this paper
pdf = find_pdf(entry_id, dict_entry["year"])
dict_entry["pdf"] = str(pdf).lower()
return dict_entry
def find_pdf(entry_id, year):
"""
Returns true if a pdf for this paper exists in the pdf/pub/year directory (must have name as paper ID)
"""
return os.path.isfile("pdf/pub/" + year + "/" + entry_id + ".pdf")
def output_entries(entries):
"""
Prints the given bibtex entries into yaml supported format
"""
with open(data_path.absolute(), 'w+', encoding='utf-8') as wf:
for entry in entries:
if int(entry["year"]) < year_from:
continue
wf.write("- id: " + entry["id"] + "\n")
for e in entry:
if e != "id":
if ":" in entry[e]:
entry[e] = '"' + entry[e] + '"'
wf.write(" " + e + ": " + entry[e] + "\n")
def check_equality(entry1, entry2):
"""
Checks if 2 entries are the same
"""
sim_fields = 0
common_fields = 0
for field1 in entry1:
for field2 in entry2:
if field1 == field2:
common_fields += 1
if similar(entry1[field1], entry2[field2]) >= similarity_threshold:
sim_fields += 1
if common_fields == 0:
return False
if sim_fields / common_fields >= similarity_threshold:
return True
return False
def similar(a, b):
"""
Checks if 2 strings are similar, returns a similarity measure.
"""
return SequenceMatcher(None, a, b).ratio()
def process_yml_entries(lines):
"""
Processes entries in yml format
:param lines: list of lines from yml file to process
:return: list of entries as dictionaries
"""
entry_list = []
entry = {}
ln = 0
for line in lines:
if "- id:" in line or ln == len(lines) - 1: # Starting a new entry
if len(entry) > 0:
entry_list.append(entry)
entry = {}
line = line.replace("\"", "")
if "- id:" in line:
line = line[1:] # Ignore first dash
stripped_line = line.strip()
if stripped_line != "": # Adding to current entry
split_line = stripped_line.split(':')
entry[split_line[0].strip()] = ':'.join(split_line[1:]).strip()
ln += 1
return entry_list
def main():
"""
Main function to process bibtex entries in a given path and output a file in yaml supported format.
"""
# Read in current entries
lines = data_path.read_text(encoding='utf-8').split('\n')
entries = process_yml_entries(lines)
# Find new entries
files = find_all_files(bib_path)
for bibfile in files:
entry = ""
full_pth = Path(bibfile)
lines = full_pth.read_text(encoding='utf-8').split('\n')
line_number = 0
for line in lines:
if "@" in line or line_number == len(lines)-1: # Starting a new entry
if entry != "":
entry = entry.translate({ord(c): None for c in '\\"{}~\'"'})
processed_entry = process_entry(entry)
entries.append(processed_entry)
entry = ""
if "@" in line:
line = line.replace("{", "=")
stripped_line = line.strip()
if stripped_line != "": # Adding to current entry
if stripped_line.endswith(","):
stripped_line = stripped_line[:-1]
entry += stripped_line + "\n"
line_number += 1
# Check for duplication
duplicate_entries = []
for i in range(len(entries)-1):
for j in range(i+1, len(entries)):
if check_equality(entries[i], entries[j]):
print("Duplicate found: " + entries[i]["id"] + " = " + entries[j]["id"])
duplicate_entries.append(j)
duplicate_entries.sort()
for i in range(len(duplicate_entries)):
e = duplicate_entries[i] - i
del entries[e]
# Finally, save entries
output_entries(entries)
if __name__ == "__main__":
main()
| 33.696682 | 119 | 0.568636 |
f80a066211d5845a2d19529db9ed13271bcad6dc
| 2,105 |
py
|
Python
|
browser.py
|
7Cortez7/instagram-giveaway-bot
|
43246e3ded06ea3a6cbf2ef20164b229fe90ee0e
|
[
"MIT"
] | null | null | null |
browser.py
|
7Cortez7/instagram-giveaway-bot
|
43246e3ded06ea3a6cbf2ef20164b229fe90ee0e
|
[
"MIT"
] | null | null | null |
browser.py
|
7Cortez7/instagram-giveaway-bot
|
43246e3ded06ea3a6cbf2ef20164b229fe90ee0e
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import time
import userdata as udata
import random
randomUsers = set()
| 31.893939 | 121 | 0.59715 |
f80b2ee49671a1d6b544de429dd777345fa6df27
| 246 |
py
|
Python
|
HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py
|
accidentalgenius09/competitive-programming-solution
|
210746a7928dcd601ad9a735de52cf7135851070
|
[
"MIT"
] | 8 |
2020-08-03T01:53:13.000Z
|
2022-01-09T14:47:58.000Z
|
HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py
|
accidentalgenius09/competitive-programming-solution
|
210746a7928dcd601ad9a735de52cf7135851070
|
[
"MIT"
] | null | null | null |
HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py
|
accidentalgenius09/competitive-programming-solution
|
210746a7928dcd601ad9a735de52cf7135851070
|
[
"MIT"
] | 4 |
2020-09-29T11:28:53.000Z
|
2021-06-02T15:34:55.000Z
|
'''
Title : Linear Algebra
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 10 May 2020
'''
import numpy
n=int(input())
a=numpy.array([input().split() for _ in range(n)],float)
print(round(numpy.linalg.det(a),2))
| 18.923077 | 56 | 0.670732 |
f80c608952146d7fe3d7ed75d5f4bc0dc27ba8ce
| 774 |
py
|
Python
|
pyretri/index/dim_processor/dim_processors_impl/l2_normalize.py
|
dongan-beta/PyRetri
|
8756d5d5813a5211b58855373b6c6cd33d7a11f6
|
[
"Apache-2.0"
] | 1,063 |
2020-04-21T12:42:05.000Z
|
2022-03-31T06:32:50.000Z
|
pyretri/index/dim_processor/dim_processors_impl/l2_normalize.py
|
dongan-beta/PyRetri
|
8756d5d5813a5211b58855373b6c6cd33d7a11f6
|
[
"Apache-2.0"
] | 39 |
2020-05-07T07:24:19.000Z
|
2022-02-02T23:49:23.000Z
|
pyretri/index/dim_processor/dim_processors_impl/l2_normalize.py
|
dongan-beta/PyRetri
|
8756d5d5813a5211b58855373b6c6cd33d7a11f6
|
[
"Apache-2.0"
] | 174 |
2020-04-26T04:33:11.000Z
|
2022-03-17T02:58:45.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from ..dim_processors_base import DimProcessorBase
from ...registry import DIMPROCESSORS
from sklearn.preprocessing import normalize
from typing import Dict, List
| 27.642857 | 75 | 0.666667 |
f80ccbd3e3b59f33892aafb3cc6b1f95f360dd40
| 1,631 |
py
|
Python
|
test_csv_write.py
|
wandyrandy/Groupme-Group-Stats-Report
|
25a59b715a7555540695639de81db390f09eb122
|
[
"MIT"
] | 2 |
2019-08-13T21:50:32.000Z
|
2019-08-14T00:49:29.000Z
|
test_csv_write.py
|
wandyrandy/Groupme-Group-Stats-Report
|
25a59b715a7555540695639de81db390f09eb122
|
[
"MIT"
] | null | null | null |
test_csv_write.py
|
wandyrandy/Groupme-Group-Stats-Report
|
25a59b715a7555540695639de81db390f09eb122
|
[
"MIT"
] | null | null | null |
import csv
import person
from random import randrange
headers = ['Name', 'Messages', 'Char Count', 'Likes Given', 'Likes Received', 'Image URL']
#tester code
people = ['bob', 'joe', 'gmo']
bob = person.Person(111, 'bob', 'www.bob.com', people)
joe = person.Person(222, 'joe', 'www.joe.com', people)
gmo = person.Person(333, 'gmo', 'www.gmo.com', people)
members = [bob, joe, gmo]
bob.msgs = randrange(40)
bob.likes_given = randrange(40)
bob.likes_received = randrange(40)
bob.chars = randrange(40)
bob.friends['gmo'] = randrange(40)
bob.friends['joe'] = randrange(40)
bob.friends['bob'] = randrange(40)
joe.msgs = randrange(40)
joe.likes_given = randrange(40)
joe.likes_received = randrange(40)
joe.chars = randrange(40)
joe.friends['gmo'] = randrange(40)
joe.friends['joe'] = randrange(40)
joe.friends['bob'] = randrange(40)
gmo.msgs = randrange(40)
gmo.likes_given = randrange(40)
gmo.likes_received = randrange(40)
gmo.chars = randrange(40)
gmo.friends['gmo'] = randrange(40)
gmo.friends['joe'] = randrange(40)
gmo.friends['bob'] = randrange(40)
# loop through the list of members and add their names to the headers
for member in members:
headers.append(member.name)
with open('raw_groupme_data.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(headers)
for member in members:
row = [member.name, member.msgs, member.chars, member.likes_given,
member.likes_received, member.image_url]
for friend in member.friends:
row.append(member.friends[friend])
csv_writer.writerow(row)
| 31.980392 | 91 | 0.676272 |
f80e19316ce840fcc2138b746a64f522d8f4566b
| 866 |
py
|
Python
|
app/wqFull/G200/testAll.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/wqFull/G200/testAll.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/wqFull/G200/testAll.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | 2 |
2021-04-04T02:45:59.000Z
|
2022-03-19T09:41:39.000Z
|
import pandas as pd
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
from hydroDL.master import basinFull
from hydroDL.app.waterQuality import WRTDS
# dataName = 'G200N'
# labelLst = ['QFPRT2C', 'FPRT2C', 'FPRT2QC', 'QFPT2C', 'QFRT2C']
dataName = 'G200'
labelLst = ['QFPRT2C']
trainLst = ['rmR20', 'rmL20', 'rmRT20', 'rmYr5', 'B10']
testLst = ['pkR20', 'pkL20', 'pkRT20', 'pkYr5', 'A10']
DF = dbBasin.DataFrameBasin(dataName)
for label in labelLst:
for trainSet, testSet in zip(trainLst, testLst):
outName = '{}-{}-{}'.format(dataName, label, trainSet)
print(outName)
yP, ycP = basinFull.testModel(outName, DF=DF, testSet=testSet, ep=500)
| 29.862069 | 78 | 0.706697 |
f80f8be872541cb1fed210e79dd3fff53a87f8a4
| 9,733 |
py
|
Python
|
tests/test_Dirichlet_NL_Poisson.py
|
bond-anton/BDPoisson1D
|
538cedc187ce83e90f340cc085738671d325d2e1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_Dirichlet_NL_Poisson.py
|
bond-anton/BDPoisson1D
|
538cedc187ce83e90f340cc085738671d325d2e1
|
[
"Apache-2.0"
] | 2 |
2017-07-21T22:10:19.000Z
|
2018-07-14T21:39:07.000Z
|
tests/test_Dirichlet_NL_Poisson.py
|
bond-anton/BDPoisson1D
|
538cedc187ce83e90f340cc085738671d325d2e1
|
[
"Apache-2.0"
] | null | null | null |
import math as m
import numpy as np
from BDMesh import Mesh1DUniform
from BDFunction1D import Function
from BDFunction1D.Functional import Functional
from BDFunction1D.Interpolation import InterpolateFunction
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_arrays
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_mesh_arrays
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_mesh
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_recurrent_mesh
from BDPoisson1D.DirichletNonLinear import dirichlet_non_linear_poisson_solver_amr
import unittest
| 44.646789 | 111 | 0.552348 |
f810064772dd89a3265f0776de267483682a707d
| 23,282 |
py
|
Python
|
trtools/dumpSTR/tests/test_dumpSTR.py
|
Kulivox/TRTools
|
ea05f9126f5145405cced8fd85821ce929657b3a
|
[
"MIT"
] | 14 |
2020-04-20T15:38:52.000Z
|
2022-02-07T11:45:23.000Z
|
trtools/dumpSTR/tests/test_dumpSTR.py
|
Kulivox/TRTools
|
ea05f9126f5145405cced8fd85821ce929657b3a
|
[
"MIT"
] | 74 |
2020-03-02T23:34:53.000Z
|
2022-03-21T18:32:10.000Z
|
trtools/dumpSTR/tests/test_dumpSTR.py
|
Kulivox/TRTools
|
ea05f9126f5145405cced8fd85821ce929657b3a
|
[
"MIT"
] | 15 |
2018-10-29T19:41:33.000Z
|
2020-02-21T18:41:51.000Z
|
import argparse
import gzip
import os
import pytest
from ..dumpSTR import *
from trtools.testsupport.utils import assert_same_vcf, assert_same_file
# Set up base argparser
# Test no such file or directory
def test_WrongFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_non_existent.vcf")
if os.path.exists(fname):
os.remove(fname)
args.vcf = fname
retcode = main(args)
assert retcode==1
# Test a file that already has Filter IDs defined
# that we want to use that are of either the wrong number of type.
# Since cyvcf2 currently won't allow us to overwrite them,
# error out
# Test a file that already has a HWE Filter ID defined
# if the field is of the correct type and number, as in this case
# we overwrite it and emit a warning instead of failing
# this allows dumpSTR to be run multiple times in succession
# on the same file
# Test if basic inputs and threshold filters work for each file
# confirm that producing zipped output doesn't crash
# Test invalid options
# Test locus-level filters
"""
def test_InvalidEHOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_ExpansionHunter.vcf")
args.vcf = fname
args.num_records = 10
# TODO add once EH is implemented
"""
"""
These tests run dumpSTR and compare its output
to output that has been generated by a pervious version of
dumpSTR and saved in the repo. The results are expected
to be identical.
These tests are too strict and will often break because
dumpSTR output has been intentionally changed
However, the presence of these tests is important because
it should prevent any unexpected changes in output.
If you've reviewed the change in output and find it acceptable,
use trtools/testsupport/sample_vcfs/dumpSTR_vcfs/create_test_files.sh
to regenerate the tests files with the new output.
"""
# make sure locus level filters produce the same output when
# --drop-filtered is set
# test advntr call level filters
# test hipstr call and locus level filters
# test gangstr call level filters that don't begin
# with 'expansion' - those are tested on another file
# test gangstr call level filters that begin with
# 'expansion' - the other gangstr call level filters
# are tested on another file
# test popstr call level filters
| 34.038012 | 129 | 0.683489 |
f81030a9747b6fbce3be0c3890586bc3da2d99c2
| 27,895 |
py
|
Python
|
nova/network/ldapdns.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/network/ldapdns.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/network/ldapdns.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2 |
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2012 Andrew Bogott for the Wikimedia Foundation'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'import'
name|'ldap'
newline|'\n'
dedent|''
name|'except'
name|'ImportError'
op|':'
newline|'\n'
comment|'# This module needs to be importable despite ldap not being a requirement'
nl|'\n'
DECL|variable|ldap
indent|' '
name|'ldap'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
name|'import'
name|'time'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
op|','
name|'_LW'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'network'
name|'import'
name|'dns_driver'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'utils'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
comment|'# Importing ldap.modlist breaks the tests for some reason,'
nl|'\n'
comment|'# so this is an abbreviated version of a function from'
nl|'\n'
comment|'# there.'
nl|'\n'
DECL|function|create_modlist
name|'def'
name|'create_modlist'
op|'('
name|'newattrs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'modlist'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'attrtype'
name|'in'
name|'newattrs'
op|'.'
name|'keys'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'utf8_vals'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'val'
name|'in'
name|'newattrs'
op|'['
name|'attrtype'
op|']'
op|':'
newline|'\n'
indent|' '
name|'utf8_vals'
op|'.'
name|'append'
op|'('
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'val'
op|')'
op|')'
newline|'\n'
dedent|''
name|'newattrs'
op|'['
name|'attrtype'
op|']'
op|'='
name|'utf8_vals'
newline|'\n'
name|'modlist'
op|'.'
name|'append'
op|'('
op|'('
name|'attrtype'
op|','
name|'newattrs'
op|'['
name|'attrtype'
op|']'
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'modlist'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|DNSEntry
dedent|''
name|'class'
name|'DNSEntry'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'ldap_object'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""ldap_object is an instance of ldap.LDAPObject.\n\n It should already be initialized and bound before\n getting passed in here.\n """'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'='
name|'ldap_object'
newline|'\n'
name|'self'
op|'.'
name|'ldap_tuple'
op|'='
name|'None'
newline|'\n'
name|'self'
op|'.'
name|'qualified_domain'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|_get_tuple_for_domain
name|'def'
name|'_get_tuple_for_domain'
op|'('
name|'cls'
op|','
name|'lobj'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entry'
op|'='
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_base_dn'
op|','
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(associatedDomain=%s)'"
op|'%'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'domain'
op|')'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'None'
newline|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'entry'
op|')'
op|'>'
number|'1'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Found multiple matches for domain "'
nl|'\n'
string|'"%(domain)s.\\n%(entry)s"'
op|')'
op|','
nl|'\n'
name|'domain'
op|','
name|'entry'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'entry'
op|'['
number|'0'
op|']'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|_get_all_domains
name|'def'
name|'_get_all_domains'
op|'('
name|'cls'
op|','
name|'lobj'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entries'
op|'='
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_base_dn'
op|','
nl|'\n'
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
string|"'(sOARecord=*)'"
op|')'
newline|'\n'
name|'domains'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'domain'
op|'='
name|'entry'
op|'['
number|'1'
op|']'
op|'.'
name|'get'
op|'('
string|"'associatedDomain'"
op|')'
newline|'\n'
name|'if'
name|'domain'
op|':'
newline|'\n'
indent|' '
name|'domains'
op|'.'
name|'append'
op|'('
name|'domain'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'domains'
newline|'\n'
nl|'\n'
DECL|member|_set_tuple
dedent|''
name|'def'
name|'_set_tuple'
op|'('
name|'self'
op|','
name|'tuple'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'ldap_tuple'
op|'='
name|'tuple'
newline|'\n'
nl|'\n'
DECL|member|_qualify
dedent|''
name|'def'
name|'_qualify'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"'%s.%s'"
op|'%'
op|'('
name|'name'
op|','
name|'self'
op|'.'
name|'qualified_domain'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_dequalify
dedent|''
name|'def'
name|'_dequalify'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'z'
op|'='
string|'".%s"'
op|'%'
name|'self'
op|'.'
name|'qualified_domain'
newline|'\n'
name|'if'
name|'name'
op|'.'
name|'endswith'
op|'('
name|'z'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dequalified'
op|'='
name|'name'
op|'['
number|'0'
op|':'
name|'name'
op|'.'
name|'rfind'
op|'('
name|'z'
op|')'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Unable to dequalify. %(name)s is not in "'
nl|'\n'
string|'"%(domain)s.\\n"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'name'"
op|':'
name|'name'
op|','
nl|'\n'
string|"'domain'"
op|':'
name|'self'
op|'.'
name|'qualified_domain'
op|'}'
op|')'
newline|'\n'
name|'dequalified'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'dequalified'
newline|'\n'
nl|'\n'
DECL|member|_dn
dedent|''
name|'def'
name|'_dn'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'0'
op|']'
newline|'\n'
DECL|variable|dn
dedent|''
name|'dn'
op|'='
name|'property'
op|'('
name|'_dn'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_rdn
name|'def'
name|'_rdn'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'dn'
op|'.'
name|'partition'
op|'('
string|"','"
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
DECL|variable|rdn
dedent|''
name|'rdn'
op|'='
name|'property'
op|'('
name|'_rdn'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|DomainEntry
dedent|''
name|'class'
name|'DomainEntry'
op|'('
name|'DNSEntry'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|_soa
name|'def'
name|'_soa'
op|'('
name|'cls'
op|')'
op|':'
newline|'\n'
indent|' '
name|'date'
op|'='
name|'time'
op|'.'
name|'strftime'
op|'('
string|"'%Y%m%d%H%M%S'"
op|')'
newline|'\n'
name|'soa'
op|'='
string|"'%s %s %s %s %s %s %s'"
op|'%'
op|'('
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_servers'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_hostmaster'
op|','
nl|'\n'
name|'date'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_refresh'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_retry'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_expiry'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_soa_minimum'
op|')'
newline|'\n'
name|'return'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'soa'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|create_domain
name|'def'
name|'create_domain'
op|'('
name|'cls'
op|','
name|'lobj'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Create a new domain entry, and return an object that wraps it."""'
newline|'\n'
name|'entry'
op|'='
name|'cls'
op|'.'
name|'_get_tuple_for_domain'
op|'('
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'if'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'FloatingIpDNSExists'
op|'('
name|'name'
op|'='
name|'domain'
op|','
name|'domain'
op|'='
string|"''"
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'newdn'
op|'='
string|"'dc=%s,%s'"
op|'%'
op|'('
name|'domain'
op|','
name|'CONF'
op|'.'
name|'ldap_dns_base_dn'
op|')'
newline|'\n'
name|'attrs'
op|'='
op|'{'
string|"'objectClass'"
op|':'
op|'['
string|"'domainrelatedobject'"
op|','
string|"'dnsdomain'"
op|','
nl|'\n'
string|"'domain'"
op|','
string|"'dcobject'"
op|','
string|"'top'"
op|']'
op|','
nl|'\n'
string|"'sOARecord'"
op|':'
op|'['
name|'cls'
op|'.'
name|'_soa'
op|'('
op|')'
op|']'
op|','
nl|'\n'
string|"'associatedDomain'"
op|':'
op|'['
name|'domain'
op|']'
op|','
nl|'\n'
string|"'dc'"
op|':'
op|'['
name|'domain'
op|']'
op|'}'
newline|'\n'
name|'lobj'
op|'.'
name|'add_s'
op|'('
name|'newdn'
op|','
name|'create_modlist'
op|'('
name|'attrs'
op|')'
op|')'
newline|'\n'
name|'return'
name|'DomainEntry'
op|'('
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
nl|'\n'
DECL|member|__init__
dedent|''
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'ldap_object'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'DomainEntry'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
name|'ldap_object'
op|')'
newline|'\n'
name|'entry'
op|'='
name|'self'
op|'.'
name|'_get_tuple_for_domain'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'_set_tuple'
op|'('
name|'entry'
op|')'
newline|'\n'
name|'assert'
op|'('
name|'entry'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
op|'['
number|'0'
op|']'
op|'=='
name|'domain'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'qualified_domain'
op|'='
name|'domain'
newline|'\n'
nl|'\n'
DECL|member|delete
dedent|''
name|'def'
name|'delete'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Delete the domain that this entry refers to."""'
newline|'\n'
name|'entries'
op|'='
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
nl|'\n'
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(aRecord=*)'"
op|')'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'delete_s'
op|'('
name|'entry'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'delete_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|')'
newline|'\n'
nl|'\n'
DECL|member|update_soa
dedent|''
name|'def'
name|'update_soa'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mlist'
op|'='
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_REPLACE'
op|','
string|"'sOARecord'"
op|','
name|'self'
op|'.'
name|'_soa'
op|'('
op|')'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
name|'mlist'
op|')'
newline|'\n'
nl|'\n'
DECL|member|subentry_with_name
dedent|''
name|'def'
name|'subentry_with_name'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entry'
op|'='
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(associatedDomain=%s.%s)'"
op|'%'
nl|'\n'
op|'('
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'name'
op|')'
op|','
nl|'\n'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'self'
op|'.'
name|'qualified_domain'
op|')'
op|')'
op|')'
newline|'\n'
name|'if'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'HostEntry'
op|'('
name|'self'
op|','
name|'entry'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'None'
newline|'\n'
nl|'\n'
DECL|member|subentries_with_ip
dedent|''
dedent|''
name|'def'
name|'subentries_with_ip'
op|'('
name|'self'
op|','
name|'ip'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entries'
op|'='
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'search_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
name|'ldap'
op|'.'
name|'SCOPE_SUBTREE'
op|','
nl|'\n'
string|"'(aRecord=%s)'"
op|'%'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'ip'
op|')'
op|')'
newline|'\n'
name|'objs'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'if'
string|"'associatedDomain'"
name|'in'
name|'entry'
op|'['
number|'1'
op|']'
op|':'
newline|'\n'
indent|' '
name|'objs'
op|'.'
name|'append'
op|'('
name|'HostEntry'
op|'('
name|'self'
op|','
name|'entry'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'return'
name|'objs'
newline|'\n'
nl|'\n'
DECL|member|add_entry
dedent|''
name|'def'
name|'add_entry'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'FloatingIpDNSExists'
op|'('
name|'name'
op|'='
name|'name'
op|','
nl|'\n'
name|'domain'
op|'='
name|'self'
op|'.'
name|'qualified_domain'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'entries'
op|'='
name|'self'
op|'.'
name|'subentries_with_ip'
op|'('
name|'address'
op|')'
newline|'\n'
name|'if'
name|'entries'
op|':'
newline|'\n'
comment|'# We already have an ldap entry for this IP, so we just'
nl|'\n'
comment|'# need to add the new name.'
nl|'\n'
indent|' '
name|'existingdn'
op|'='
name|'entries'
op|'['
number|'0'
op|']'
op|'.'
name|'dn'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'existingdn'
op|','
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_ADD'
op|','
nl|'\n'
string|"'associatedDomain'"
op|','
nl|'\n'
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'name'
op|')'
op|')'
op|')'
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# We need to create an entirely new entry.'
nl|'\n'
indent|' '
name|'newdn'
op|'='
string|"'dc=%s,%s'"
op|'%'
op|'('
name|'name'
op|','
name|'self'
op|'.'
name|'dn'
op|')'
newline|'\n'
name|'attrs'
op|'='
op|'{'
string|"'objectClass'"
op|':'
op|'['
string|"'domainrelatedobject'"
op|','
string|"'dnsdomain'"
op|','
nl|'\n'
string|"'domain'"
op|','
string|"'dcobject'"
op|','
string|"'top'"
op|']'
op|','
nl|'\n'
string|"'aRecord'"
op|':'
op|'['
name|'address'
op|']'
op|','
nl|'\n'
string|"'associatedDomain'"
op|':'
op|'['
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'name'
op|')'
op|']'
op|','
nl|'\n'
string|"'dc'"
op|':'
op|'['
name|'name'
op|']'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'add_s'
op|'('
name|'newdn'
op|','
name|'create_modlist'
op|'('
name|'attrs'
op|')'
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|remove_entry
dedent|''
dedent|''
name|'def'
name|'remove_entry'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'entry'
op|'='
name|'self'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'entry'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'entry'
op|'.'
name|'remove_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'update_soa'
op|'('
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|HostEntry
dedent|''
dedent|''
name|'class'
name|'HostEntry'
op|'('
name|'DNSEntry'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'parent'
op|','
name|'tuple'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'HostEntry'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
name|'parent'
op|'.'
name|'lobj'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'parent_entry'
op|'='
name|'parent'
newline|'\n'
name|'self'
op|'.'
name|'_set_tuple'
op|'('
name|'tuple'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'qualified_domain'
op|'='
name|'parent'
op|'.'
name|'qualified_domain'
newline|'\n'
nl|'\n'
DECL|member|remove_name
dedent|''
name|'def'
name|'remove_name'
op|'('
name|'self'
op|','
name|'name'
op|')'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'='
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
newline|'\n'
name|'if'
name|'not'
name|'names'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'names'
op|')'
op|'>'
number|'1'
op|':'
newline|'\n'
comment|'# We just have to remove the requested domain.'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_DELETE'
op|','
string|"'associatedDomain'"
op|','
nl|'\n'
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'name'
op|')'
op|')'
op|')'
op|']'
op|')'
newline|'\n'
name|'if'
op|'('
name|'self'
op|'.'
name|'rdn'
op|'['
number|'1'
op|']'
op|'=='
name|'name'
op|')'
op|':'
newline|'\n'
comment|'# We just removed the rdn, so we need to move this entry.'
nl|'\n'
indent|' '
name|'names'
op|'.'
name|'remove'
op|'('
name|'self'
op|'.'
name|'_qualify'
op|'('
name|'name'
op|')'
op|')'
newline|'\n'
name|'newrdn'
op|'='
string|"'dc=%s'"
op|'%'
name|'self'
op|'.'
name|'_dequalify'
op|'('
name|'names'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modrdn_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
op|'['
name|'newrdn'
op|']'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# We should delete the entire record.'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'delete_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|')'
newline|'\n'
nl|'\n'
DECL|member|modify_address
dedent|''
dedent|''
name|'def'
name|'modify_address'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'='
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
newline|'\n'
name|'if'
name|'not'
name|'names'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'NotFound'
op|'('
op|')'
newline|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'names'
op|')'
op|'=='
number|'1'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'modify_s'
op|'('
name|'self'
op|'.'
name|'dn'
op|','
op|'['
op|'('
name|'ldap'
op|'.'
name|'MOD_REPLACE'
op|','
string|"'aRecord'"
op|','
nl|'\n'
op|'['
name|'utils'
op|'.'
name|'utf8'
op|'('
name|'address'
op|')'
op|']'
op|')'
op|']'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'remove_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'parent'
op|'.'
name|'add_entry'
op|'('
name|'name'
op|','
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_names
dedent|''
dedent|''
name|'def'
name|'_names'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'domain'
name|'in'
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'associatedDomain'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'.'
name|'append'
op|'('
name|'self'
op|'.'
name|'_dequalify'
op|'('
name|'domain'
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'names'
newline|'\n'
DECL|variable|names
dedent|''
name|'names'
op|'='
name|'property'
op|'('
name|'_names'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_ip
name|'def'
name|'_ip'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ip'
op|'='
name|'self'
op|'.'
name|'ldap_tuple'
op|'['
number|'1'
op|']'
op|'['
string|"'aRecord'"
op|']'
op|'['
number|'0'
op|']'
newline|'\n'
name|'return'
name|'ip'
newline|'\n'
DECL|variable|ip
dedent|''
name|'ip'
op|'='
name|'property'
op|'('
name|'_ip'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_parent
name|'def'
name|'_parent'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'parent_entry'
newline|'\n'
DECL|variable|parent
dedent|''
name|'parent'
op|'='
name|'property'
op|'('
name|'_parent'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|LdapDNS
dedent|''
name|'class'
name|'LdapDNS'
op|'('
name|'dns_driver'
op|'.'
name|'DNSDriver'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Driver for PowerDNS using ldap as a back end.\n\n This driver assumes ldap-method=strict, with all domains\n in the top-level, aRecords only.\n """'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'ldap'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'ImportError'
op|'('
name|'_'
op|'('
string|"'ldap not installed'"
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'lobj'
op|'='
name|'ldap'
op|'.'
name|'initialize'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_url'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'lobj'
op|'.'
name|'simple_bind_s'
op|'('
name|'CONF'
op|'.'
name|'ldap_dns_user'
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'ldap_dns_password'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_domains
dedent|''
name|'def'
name|'get_domains'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'DomainEntry'
op|'.'
name|'_get_all_domains'
op|'('
name|'self'
op|'.'
name|'lobj'
op|')'
newline|'\n'
nl|'\n'
DECL|member|create_entry
dedent|''
name|'def'
name|'create_entry'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|','
name|'type'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'type'
op|'.'
name|'lower'
op|'('
op|')'
op|'!='
string|"'a'"
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InvalidInput'
op|'('
name|'_'
op|'('
string|'"This driver only supports "'
nl|'\n'
string|'"type \'a\' entries."'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'dEntry'
op|'.'
name|'add_entry'
op|'('
name|'name'
op|','
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|delete_entry
dedent|''
name|'def'
name|'delete_entry'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'dEntry'
op|'.'
name|'remove_entry'
op|'('
name|'name'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_entries_by_address
dedent|''
name|'def'
name|'get_entries_by_address'
op|'('
name|'self'
op|','
name|'address'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'NotFound'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
name|'entries'
op|'='
name|'dEntry'
op|'.'
name|'subentries_with_ip'
op|'('
name|'address'
op|')'
newline|'\n'
name|'names'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'entry'
name|'in'
name|'entries'
op|':'
newline|'\n'
indent|' '
name|'names'
op|'.'
name|'extend'
op|'('
name|'entry'
op|'.'
name|'names'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'names'
newline|'\n'
nl|'\n'
DECL|member|get_entries_by_name
dedent|''
name|'def'
name|'get_entries_by_name'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'NotFound'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
name|'nEntry'
op|'='
name|'dEntry'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'if'
name|'nEntry'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'nEntry'
op|'.'
name|'ip'
op|']'
newline|'\n'
nl|'\n'
DECL|member|modify_address
dedent|''
dedent|''
name|'def'
name|'modify_address'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'address'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'nEntry'
op|'='
name|'dEntry'
op|'.'
name|'subentry_with_name'
op|'('
name|'name'
op|')'
newline|'\n'
name|'nEntry'
op|'.'
name|'modify_address'
op|'('
name|'name'
op|','
name|'address'
op|')'
newline|'\n'
nl|'\n'
DECL|member|create_domain
dedent|''
name|'def'
name|'create_domain'
op|'('
name|'self'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'DomainEntry'
op|'.'
name|'create_domain'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
nl|'\n'
DECL|member|delete_domain
dedent|''
name|'def'
name|'delete_domain'
op|'('
name|'self'
op|','
name|'domain'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dEntry'
op|'='
name|'DomainEntry'
op|'('
name|'self'
op|'.'
name|'lobj'
op|','
name|'domain'
op|')'
newline|'\n'
name|'dEntry'
op|'.'
name|'delete'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|delete_dns_file
dedent|''
name|'def'
name|'delete_dns_file'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"This shouldn\'t be getting called except during "'
nl|'\n'
string|'"testing."'
op|')'
op|')'
newline|'\n'
name|'pass'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.044473 | 174 | 0.578276 |
f81075d9a768c275f1cbe075abbbe7e3dce2e3c6
| 2,554 |
py
|
Python
|
src/weekly_contest_251/1946_largest-number-after-mutating-substring.py
|
dongminlee94/leetcode-practice
|
4d33816d66df8ab447087a04b76008f6bec51f23
|
[
"MIT"
] | null | null | null |
src/weekly_contest_251/1946_largest-number-after-mutating-substring.py
|
dongminlee94/leetcode-practice
|
4d33816d66df8ab447087a04b76008f6bec51f23
|
[
"MIT"
] | null | null | null |
src/weekly_contest_251/1946_largest-number-after-mutating-substring.py
|
dongminlee94/leetcode-practice
|
4d33816d66df8ab447087a04b76008f6bec51f23
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
1946. Largest Number After Mutating Substring
https://leetcode.com/problems/largest-number-after-mutating-substring/
Example 1:
Input: num = "132", change = [9,8,5,0,3,6,4,2,6,8]
Output: "832"
Explanation: Replace the substring "1":
- 1 maps to change[1] = 8.
Thus, "132" becomes "832".
"832" is the largest number that can be created, so return it.
Example 2:
Input: num = "021", change = [9,4,3,5,7,2,1,9,0,6]
Output: "934"
Explanation: Replace the substring "021":
- 0 maps to change[0] = 9.
- 2 maps to change[2] = 3.
- 1 maps to change[1] = 4.
Thus, "021" becomes "934".
"934" is the largest number that can be created, so return it.
Example 3:
Input: num = "5", change = [1,4,7,5,3,2,5,6,9,4]
Output: "5"
Explanation: "5" is already the largest number that can be created, so return it.
"""
from typing import List
| 30.047059 | 98 | 0.523101 |
f810c90f204568fd67dca941d0e6266424f5517d
| 261 |
py
|
Python
|
test/unit/bot/test_bot.py
|
kubicki14/BurtTheCardKing
|
d0854ea08ffdffee687830097b0645069e263d9d
|
[
"MIT"
] | null | null | null |
test/unit/bot/test_bot.py
|
kubicki14/BurtTheCardKing
|
d0854ea08ffdffee687830097b0645069e263d9d
|
[
"MIT"
] | 1 |
2020-03-04T04:39:20.000Z
|
2020-03-04T04:39:20.000Z
|
test/unit/bot/test_bot.py
|
kubicki14/BurtTheCardKing
|
d0854ea08ffdffee687830097b0645069e263d9d
|
[
"MIT"
] | null | null | null |
import pytest
from bot.bot import Bot
| 20.076923 | 75 | 0.659004 |
f8125fde876b086f96371a2951d0cf190eba3f48
| 1,437 |
py
|
Python
|
Hackerrank/30DaysOfCode/Day9-Recursion3.py
|
eduardormonteiro/PythonPersonalLibrary
|
561733bb8305c4e25a08f99c28b60ec77251ad67
|
[
"MIT"
] | null | null | null |
Hackerrank/30DaysOfCode/Day9-Recursion3.py
|
eduardormonteiro/PythonPersonalLibrary
|
561733bb8305c4e25a08f99c28b60ec77251ad67
|
[
"MIT"
] | null | null | null |
Hackerrank/30DaysOfCode/Day9-Recursion3.py
|
eduardormonteiro/PythonPersonalLibrary
|
561733bb8305c4e25a08f99c28b60ec77251ad67
|
[
"MIT"
] | null | null | null |
"""
Hackerrank
Day 9: Recursion 3
https://www.hackerrank.com/challenges/30-recursion/problem?h_r=email&unlock_token=bc6d5f3963afb26ed0b2f69c3f4f3ddb1826e1b2&utm_campaign=30_days_of_code_continuous&utm_medium=email&utm_source=daily_reminder
Objective
Today, we are learning about an algorithmic concept called recursion. Check out the Tutorial tab for learning materials and an instructional video.
Recursive Method for Calculating Factorial
Function Description
Complete the factorial function in the editor below. Be sure to use recursion.
factorial has the following paramter:
int n: an integer
Returns
int: the factorial of
Note: If you fail to use recursion or fail to name your recursive function factorial or Factorial, you will get a score of .
Input Format
A single integer, (the argument to pass to factorial).
Constraints
Your submission must contain a recursive function named factorial.
Sample Input
3
Sample Output
6
Explanation
Consider the following steps. After the recursive calls from step 1 to 3, results are accumulated from step 3 to 1.
"""
import math
import os
import random
import re
import sys
# Complete the factorial function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
result = factorial(n)
fptr.write(str(result) + '\n')
fptr.close()
| 22.809524 | 205 | 0.76618 |
f8126dd049c2dae8dffb8bb81f37f683297e9ca7
| 408 |
py
|
Python
|
todolist/main/models.py
|
gda2048/TODOlist
|
cc6c359ab0a8d2f43ed82b19dfc0eb5d640f8b9f
|
[
"MIT"
] | 1 |
2019-12-19T19:04:02.000Z
|
2019-12-19T19:04:02.000Z
|
todolist/main/models.py
|
gda2048/TODOlist
|
cc6c359ab0a8d2f43ed82b19dfc0eb5d640f8b9f
|
[
"MIT"
] | 5 |
2020-02-12T02:57:13.000Z
|
2021-12-13T20:02:16.000Z
|
todolist/main/models.py
|
gda2048/TODOlist
|
cc6c359ab0a8d2f43ed82b19dfc0eb5d640f8b9f
|
[
"MIT"
] | null | null | null |
from django.db import models
| 29.142857 | 68 | 0.70098 |
f812c1ff23e3b82b8ed9c4bca10c6b857649c53a
| 2,358 |
py
|
Python
|
src/qbrobot/util/log.py
|
jucuguru/crypto-robot-basic
|
3addaaff9fb2f41d8e9dcd66bae7ae7f75216704
|
[
"BSD-2-Clause"
] | null | null | null |
src/qbrobot/util/log.py
|
jucuguru/crypto-robot-basic
|
3addaaff9fb2f41d8e9dcd66bae7ae7f75216704
|
[
"BSD-2-Clause"
] | null | null | null |
src/qbrobot/util/log.py
|
jucuguru/crypto-robot-basic
|
3addaaff9fb2f41d8e9dcd66bae7ae7f75216704
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
from qbrobot import qsettings
try :
from util import send_dingding
except ImportError:
DINGDING_CANUSE = False
else:
DINGDING_CANUSE = True
"""
class DingDingLogger
pass all args to logger.method, and call dingding.send_msg()
1. debug message don't send to dingding.
2. only send_msg( message ), can't pass multi args.
"""
"""
handler = logging.handlers.RotatingFileHandler(str(logFile) + '.LOG', maxBytes = 1024 * 1024 * 500, backupCount = 5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger(str(logFile))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
"""
| 25.912088 | 116 | 0.651399 |
f81309425c4d43dc4fcef12218a6de6d14c72768
| 722 |
py
|
Python
|
Country cleaning/Chile/PRT/OfflineRB.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
Country cleaning/Chile/PRT/OfflineRB.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
Country cleaning/Chile/PRT/OfflineRB.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
# %%
import os
import pandas as pd
import numpy as np
import datetime
# %% CARGA DE DATOS
path = r'F:\Trabajo\Promotive\Chile\PRT\7\CSV\3'
os.chdir(path)
files = os.listdir(path)
files
# %%
files_xls = [f for f in files if f[-3:] == 'csv']
files_xls
# %%
columnas = ['PPU', 'MARCA', 'MODELO', 'ANO_FABRICACION', 'NUM_MOTOR', 'NUM_CHASIS', 'VIN']
chile = pd.DataFrame(columns=columnas)
# %%
for f in files_xls:
data = pd.read_csv(f, sep=";", encoding="latin-1")
chile = pd.concat([chile , data], ignore_index=True, join='outer')
# %%
chile = chile[columnas]
# %%
chile.drop_duplicates(subset="PPU", inplace=True)
# %%
chile.to_csv(r'F:\Trabajo\Promotive\Chile\PRT\Limpio\OfflineRB3.csv')
# %%
chile
# %%
| 17.609756 | 90 | 0.65651 |
f815471c4b7feac192ccd8f44032afcd4c9605be
| 3,850 |
py
|
Python
|
datasets/lfw_crop.py
|
laoreja/face-identity-transformer
|
5569d93017ad9371deae7e2b35564523c64b501e
|
[
"BSD-3-Clause"
] | 13 |
2020-10-09T07:15:02.000Z
|
2022-03-28T20:51:30.000Z
|
datasets/lfw_crop.py
|
laoreja/face-identity-transformer
|
5569d93017ad9371deae7e2b35564523c64b501e
|
[
"BSD-3-Clause"
] | 2 |
2021-03-03T15:04:51.000Z
|
2021-06-02T03:42:03.000Z
|
datasets/lfw_crop.py
|
laoreja/face-identity-transformer
|
5569d93017ad9371deae7e2b35564523c64b501e
|
[
"BSD-3-Clause"
] | 5 |
2021-03-02T11:44:19.000Z
|
2021-07-09T16:42:02.000Z
|
import os.path as osp
import numpy as np
from PIL import Image
import torch.utils.data as data
import torch
__all__ = ['LFW_CROP']
EXTENSION_FACTOR = 2
| 37.745098 | 114 | 0.540779 |
f816945723bd501f06ebbe8199fa11cd256a3a52
| 1,065 |
py
|
Python
|
test.py
|
JFF-Bohdan/pyimei
|
d881f4a11374d29828867e2de397d1fcc8413d25
|
[
"MIT"
] | 1 |
2021-07-29T17:39:34.000Z
|
2021-07-29T17:39:34.000Z
|
test.py
|
JFF-Bohdan/pyimei
|
d881f4a11374d29828867e2de397d1fcc8413d25
|
[
"MIT"
] | null | null | null |
test.py
|
JFF-Bohdan/pyimei
|
d881f4a11374d29828867e2de397d1fcc8413d25
|
[
"MIT"
] | 3 |
2018-08-07T08:01:01.000Z
|
2020-03-24T17:14:31.000Z
|
from pyimei import ImeiSupport
#testing classes
ImeiSupport.test()
valid_imeis = [
356938035643809,
490154203237518,
"356938035643809"
]
invalid_imeis = [
358065019104263,
"357805023984941",
356938035643801
]
checkImeisArray(valid_imeis)
checkImeisArray(invalid_imeis)
print("Generating independent FAKE imeis...")
RANDOM_IMEIS_QTY = 5
for i in range(RANDOM_IMEIS_QTY):
print("\tfake IMEI[{}] = {}".format(i+1, ImeiSupport.generateNew()))
print("Generating sequental FAKE imeis:")
DEP_RANDOM_IMEIS_QTY = 5
startImei = ImeiSupport.generateNew()
currentImei = startImei
print("start IMEI: {}".format(startImei))
for i in range(RANDOM_IMEIS_QTY):
currentImei = ImeiSupport.next(currentImei)
print("\tfake IMEI[{}] = {}".format(i+1, currentImei))
print("DONE")
| 23.152174 | 73 | 0.66385 |
f816d939ecc6c4f196c356dcf81afa3b4caf0b94
| 2,175 |
py
|
Python
|
aioauth/responses.py
|
grmnz/aioauth
|
e69c989bc81284d60798599816c39ff91074a24b
|
[
"MIT"
] | null | null | null |
aioauth/responses.py
|
grmnz/aioauth
|
e69c989bc81284d60798599816c39ff91074a24b
|
[
"MIT"
] | null | null | null |
aioauth/responses.py
|
grmnz/aioauth
|
e69c989bc81284d60798599816c39ff91074a24b
|
[
"MIT"
] | null | null | null |
"""
.. code-block:: python
from aioauth import responses
Response objects used throughout the project.
----
"""
from dataclasses import dataclass, field
from http import HTTPStatus
from typing import Dict
from .collections import HTTPHeaderDict
from .constances import default_headers
from .types import ErrorType, TokenType
| 20.518868 | 111 | 0.722299 |
f818d292ca6f1460d6aa1027f16f35e13ba6829c
| 5,441 |
py
|
Python
|
fipomdp/experiments/NYC_experiment.py
|
xbrlej/FiPOMDP
|
b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f
|
[
"MIT"
] | null | null | null |
fipomdp/experiments/NYC_experiment.py
|
xbrlej/FiPOMDP
|
b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f
|
[
"MIT"
] | null | null | null |
fipomdp/experiments/NYC_experiment.py
|
xbrlej/FiPOMDP
|
b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
import platform
import time
from functools import partial
from statistics import stdev
from typing import List, Tuple, Dict, Union, Any
import psutil
from joblib import Parallel, delayed
from fimdp.objectives import BUCHI
from fipomdp import ConsPOMDP
from fipomdp.energy_solvers import ConsPOMDPBasicES
from fipomdp.experiments.NYC_environment import NYCPOMDPEnvironment
from fipomdp.experiments.UUV_experiment import simulate_observation
from fipomdp.pomcp import OnlineStrategy
from fipomdp.rollout_functions import basic, grid_manhattan_distance, product, consumption_based
if __name__ == "__main__":
main()
| 33.58642 | 147 | 0.695093 |
f818e9acfc35ef6a4d51efdba0e1aa6dcf47703d
| 399 |
py
|
Python
|
examples/connect_to_wifi.py
|
flaiming/TechFurMeet-Micropython
|
00ff427429dfc186e33aa5e77bafe39eb820b854
|
[
"MIT"
] | 1 |
2018-01-19T12:05:32.000Z
|
2018-01-19T12:05:32.000Z
|
examples/connect_to_wifi.py
|
flaiming/TechFurMeet-Micropython
|
00ff427429dfc186e33aa5e77bafe39eb820b854
|
[
"MIT"
] | null | null | null |
examples/connect_to_wifi.py
|
flaiming/TechFurMeet-Micropython
|
00ff427429dfc186e33aa5e77bafe39eb820b854
|
[
"MIT"
] | null | null | null |
import network
import time
# deactivate AP
ap = network.WLAN(network.AP_IF)
ap.active(False)
# activate static network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
# connect to local WIFI
wlan.connect('TFM-Attendees')
# wait until connected
while not wlan.isconnected():
print('connecting...')
time.sleep(1)
print('Connected!')
print('Current network config:', wlan.ifconfig())
| 19 | 49 | 0.736842 |
f8197ad55d7f3b5e1e727b66b9aaef3047efa623
| 3,317 |
py
|
Python
|
hikcamerabot/services/tasks/video.py
|
CamVipQ/hikvision-camera-bot
|
84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2
|
[
"MIT"
] | 44 |
2019-03-07T00:25:44.000Z
|
2022-02-20T15:57:11.000Z
|
hikcamerabot/services/tasks/video.py
|
CamVipQ/hikvision-camera-bot
|
84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2
|
[
"MIT"
] | 25 |
2019-02-17T13:37:27.000Z
|
2022-03-22T16:11:46.000Z
|
hikcamerabot/services/tasks/video.py
|
CamVipQ/hikvision-camera-bot
|
84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2
|
[
"MIT"
] | 14 |
2019-06-28T05:40:10.000Z
|
2022-03-24T08:05:01.000Z
|
import asyncio
import logging
import os
import time
from addict import Addict
from aiogram.types import Message
from hikcamerabot.config.config import get_result_queue
from hikcamerabot.constants import Event, VideoGifType
from hikcamerabot.utils.utils import format_ts, gen_random_str
| 35.666667 | 88 | 0.621948 |
f81adf96e79c10244b5314e809ea884419299412
| 71,349 |
py
|
Python
|
HyperOXO/hypercube.py
|
drtjc/Hyper
|
83579186d915de603d27b8757dfc5a0f82c6770e
|
[
"MIT"
] | null | null | null |
HyperOXO/hypercube.py
|
drtjc/Hyper
|
83579186d915de603d27b8757dfc5a0f82c6770e
|
[
"MIT"
] | null | null | null |
HyperOXO/hypercube.py
|
drtjc/Hyper
|
83579186d915de603d27b8757dfc5a0f82c6770e
|
[
"MIT"
] | null | null | null |
""" Provides functionalilty for working with celled hypercubes.
Hypercubes are extensions of lines, squares and cubes into higher
dimensions. Celled hypercubes can be thought as a grid or lattice
structure. From this point, hypercubes is used to mean celled
hypercubes.
A hypercube can be described by its dimension and the number of
cells in any dimension. We denote this as h(d, n).
For example: h(2, 3) is a 3x3 grid; h(3, 4) is a 4x4x4 lattice.
A hypercube of dimension d may also be referred to as a d-cube.
A cell's position can be specified in coordinate style.
For example, given h(3, 4) and an agreed ordering of dimension
then some valid coordinates are (1,1,1), (2,1,3) and (4,4,4).
The term m-agonal is a short for "m-dimensional diagonal" and can be
thought of as a line of contiguous cells that span m dimensions.
For example, in a 3-cube you would find many 1-agonals, 2-agonals and
3-agonals. A 1-agonal is customarily known as a row, column or pillar.
In another example, if a line of contiguous cells in a 5-cell have the
property that 3 coordinates change, while the others remain constant,
these cells constitute a 3-agonal.
For a given h(d, n), 1 <= m <= n, a m-agonal always has n cells.
The term line is used to refer to any m-agonal in general.
A cell apppears in multiple lines, which are refered to as the
scope of the cell, or the scoped lines of the cell.
The combination of lines and scopes is referred to as the structure
of the hypercube.
For a given cell, we define its connected cells as those cells that
appear in the scoped lines of the given cell.
We define a slice as a sub-cube of a hypercube. For example,
consder h(2,3), a 3x3 hypercube. Let the dimensions be denoted as
d1 and d2, respectively, where 1 <= d1, d2 <= 3.
If we consider d1 as rows, and d2 as columns, then the slice that is
the first column is defined by d1 = 1, 2, 3, and d2 = 1. This has the
form h(1, 3).
The slice that is the top left 2x2 corner is defined by d1, d2 = 1, 2.
This has the form h(2, 2).
This module essentially has 2 classes of functions:
1. Those that use a numpy ndarray to implement the underlying
hypercube. These functions have the suffix _np. An array of d dimensions
may be referred to as a d-array
2. Those that do not implement the underlying hypercube but
provide information as coordinates that can be used with
a user-implementation of the hypercube. These functions have
the suffix _coord.
########################################################################
Type annotations are used in this module. In addition to the standard
types defined in the typing module, several aliases are also defined
which can be viewed in the source code.
"""
# numpy (and scipy) don't yet have type annotations
import numpy as np # type: ignore
from scipy.special import comb # type: ignore
import itertools as it
import numbers
import re
from typing import List, Callable, Union, Collection, Tuple, Any, Type, Deque
from typing import DefaultDict, TypeVar, Counter, Dict, Iterable, Generator, Sequence
Cell_coord = Tuple[int, ...]
Cube_np = TypeVar('Cube_np', np.ndarray, np.ndarray) # Cube_np should really be a numpy array representing h(d, n)
Line_np = TypeVar('Line_np', np.ndarray, np.ndarray) # Line_np should really be a 1d numpy array with n elements
Line_coord = List[Cell_coord]
Lines_np = List[Line_np]
Lines_enum_np = Dict[int, Line_np]
Lines_coord = List[Line_coord]
Lines_enum_coord = Dict[int, Line_coord]
Scopes_np = DefaultDict[Cell_coord, Lines_np]
Scopes_coord = DefaultDict[Cell_coord, Lines_coord]
Scopes_enum = DefaultDict[Cell_coord, List[int]]
Scopes = Union[Scopes_np, Scopes_coord, Scopes_enum]
Structure_np = Tuple[Cube_np, Lines_np, Scopes_np]
Structure_enum_np = Tuple[Cube_np, Lines_enum_np, Scopes_enum]
Structure_coord = Tuple[Lines_coord, Scopes_coord]
Structure_enum_coord = Tuple[Lines_enum_coord, Scopes_enum]
Connected_cells = DefaultDict[Cell_coord, List[Cell_coord]]
def num_lines(d: int, n: int) -> int:
"""
num_lines(d: int, n: int) -> int:
Calculate the number of lines in a hypercube.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
The number of lines in a hypercube.
See Also
--------
num_lines_grouped
Notes
-----
There are two ways to calculate the number of lines:
1. Call the function num_lines_grouped and sum the number of lines
spanning each dimension.
2. Directly, using the formula:
((n+2)**d-n**d)/2
Sketch of proof:
Embed the n**d hypercube in an (n+2)**d hypercube which extends one
cell further in each dimension. Then each winning line in the n**d
hypercube terminates in exactly two "border" cells of the enlarged
hypercube, and these two borders are unique to that line. Moreover,
every border cell is at the end of a line, so that (n+2)**d border
cells are in two-to-one correspondence with the winning lines.
(See Hypercube -Tic-Tac-Toe: Solomon W.Golomb and Alfred W. Hales)
Examples
--------
>>> num_lines(2, 3)
8
>>> num_lines(3, 4)
76
"""
# return sum(list(num_lines_grouped(d, n)))
return int(((n+2)**d-n**d)/2)
def get_scopes_np(lines: Lines_np, d: int) -> Scopes_np:
"""
get_scopes_np(lines: Lines_np, d: int) -> Scopes_np:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_np(hc) where hc is of the
form np.arange(n ** d, dtype = intx__).reshape([n] * d).
That is, hc is populated with the values 0,1,2,...,n^d - 1.
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to the coordinates of each cell in
the hypercube. For each cell key, the value is the cell's
scope - a list of numpy.ndarray views that are lines containing
the cell.
See Also
--------
get_lines_np
Notes
-----
The implementation of this function uses np.unravel_index, and
relies uopn the lines parameter being generated from an array
populated with values 0,1,2,...
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> scopes = get_scopes_np(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])],
(0, 1): [array([1, 3]), array([0, 1]), array([2, 1])],
(1, 0): [array([0, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]),
((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]),
((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])]
>>> hc[0, 0] = 99
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([99, 2]), array([99, 1]), array([99, 3])],
(0, 1): [array([1, 3]), array([99, 1]), array([2, 1])],
(1, 0): [array([99, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([99, 3])]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([99, 2]), array([99, 1]), array([99, 3])]),
((0, 1), [array([1, 3]), array([99, 1]), array([2, 1])]),
((1, 0), [array([99, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([99, 3])])]
"""
n = lines[0].size
shape = [n] * d
scopes: Scopes_np = DefaultDict(list)
for line in lines:
for j in range(n):
cell = np.unravel_index(line[j], shape)
scopes[cell].append(line)
return scopes
def structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_np:
"""
structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) ->
Structure_np:
Return a hypercube, its lines, and the scopes of its cells.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
zeros
If true, all values in array are 0, else they are 0,1,2,...
OFFSET
The number of cells is n^d. If this greater than
(2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32)
as the dtype of the numpy array.
Returns
-------
The hypercube (as a numpy array), its lines, and the scopes of
its cells.
See Also
--------
get_lines_np
get_scopes_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> struct = structure_np(2, 2)
>>> struct[0]
array([[0, 0],
[0, 0]])
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
[array([0, 0]), array([0, 0]), array([0, 0]), array([0, 0]),
array([0, 0]), array([0, 0])]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 0]), array([0, 0]), array([0, 0])],
(0, 1): [array([0, 0]), array([0, 0]), array([0, 0])],
(1, 0): [array([0, 0]), array([0, 0]), array([0, 0])],
(1, 1): [array([0, 0]), array([0, 0]), array([0, 0])]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 0]), array([0, 0]), array([0, 0])]),
((0, 1), [array([0, 0]), array([0, 0]), array([0, 0])]),
((1, 0), [array([0, 0]), array([0, 0]), array([0, 0])]),
((1, 1), [array([0, 0]), array([0, 0]), array([0, 0])])]
>>> struct = structure_np(2, 2, False)
>>> struct[0]
array([[0, 1],
[2, 3]])
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])],
(0, 1): [array([1, 3]), array([0, 1]), array([2, 1])],
(1, 0): [array([0, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]),
((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]),
((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])]
"""
# number of cells is n^d. If this greater than (2^31 - OFFSET - 1)
# then we use int64. This is because the get_scopes
# function populates the arrays with values 0,1,2, ...
dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32
hc = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines = list(get_lines_np(hc))
scopes = get_scopes_np(lines, d)
if zeros:
hc.fill(0)
return (hc, lines, scopes)
def get_lines_enum_np(hc: Cube_np) -> Lines_enum_np:
"""
get_lines_enum_np(hc: Cube_np) -> Lines_enum_np
Returns emunerated lines of a hypercube
Parameters
----------
hc
The hypercube whose lines are to be calculated
Returns
-------
Enumerated numpy.ndarray views of the lines in `hc`.
See Also
--------
get_lines_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = get_lines_enum_np(hc)
>>> pprint(lines) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
"""
lines: Lines_enum_np = dict()
idx = 0
for line in get_lines_np(hc):
lines[idx] = line
idx += 1
return lines
def get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum:
"""
get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_enum_np(hc) where hc is of the
form np.arange(n ** d, dtype = intxx).reshape([n] * d).
That is, hc is populated with the values 0,1,2,...,n^d - 1.
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to each cell coordinates of the
hypercube. For each cell key, the value is the cell's
scope - a list of line enumerations that are lines containing
the cell.
See Also
--------
get_lines_enum_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = get_lines_enum_np(hc)
>>> pprint(lines) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
>>> scopes = get_scopes_enum_np(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
"""
n = lines[0].size
shape = [n] * d
scopes: Scopes_enum = DefaultDict(list)
for idx, line in lines.items():
for j in range(n):
cell = np.unravel_index(line[j], shape)
scopes[cell].append(idx)
return scopes
def structure_enum_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_enum_np:
"""
structure_enum_np(d: int, n: int, zeros: bool = True,
OFFSET: int = 0) ->
Structure_enum_np:
Return a hypercube, its enumerated lines and the scopes of
its cell scopes.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
zeros
If true, all values in array are 0, else they are 0,1,2,...
base: int
Tne number of cells is n^d. If this greater than
(2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32)
as the dtype of the numpy array.
Returns
-------
A tuple containing the hypercube, its enumerated lines, and the
scopes of its cells.
See Also
--------
get_lines_enum_np
get_scopes_enum_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> struct = structure_enum_np(2, 2)
>>> struct[0]
array([[0, 0],
[0, 0]])
>>> pprint(struct[1]) #doctest: +SKIP
{0: array([0, 0]), 1: array([0, 0]), 2: array([0, 0]),
3: array([0, 0]), 4: array([0, 0]), 5: array([0, 0])}
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 0])), (1, array([0, 0])), (2, array([0, 0])),
(3, array([0, 0])), (4, array([0, 0])), (5, array([0, 0]))]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
>>> struct = structure_enum_np(2, 2, False)
>>> struct[0]
array([[0, 1],
[2, 3]])
>>> pprint(struct[1]) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
"""
# number of cells is n^d. If this greater than (2^31 - OFFSET - 1)
# then we use int64. This is because the the get_scopes
# function populates the arrays with values 0,1,2, ...
dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32
hc = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines = get_lines_enum_np(hc)
scopes = get_scopes_enum_np(lines, d)
if zeros:
hc.fill(0)
return (hc, lines, scopes)
def connected_cells_np(lines: Lines_enum_np, scopes: Scopes_enum, d: int) -> Connected_cells:
"""
connected_cells_np(lines: Lines_enum_np,
scopes: Scopes_enum, d: int) -> Connected_cells:
Calculate the connected cells for a cube.
Parameters
----------
lines
The enumerated lines of the hypercube
scopes
The enumerated scopes of the hypercube
Returns
------
A dictionary with keys beings cell coordinates and values the
connected cell coordinates.
See Also
--------
structure_enum_np
Examples
--------
>>> from pprint import pprint
>>> d = 2
>>> n = 3
>>> struct = structure_enum_np(d, n, False)
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
{0: array([0, 3, 6]),
1: array([1, 4, 7]),
2: array([2, 5, 8]),
3: array([0, 1, 2]),
4: array([3, 4, 5]),
5: array([6, 7, 8]),
6: array([0, 4, 8]),
7: array([6, 4, 2])}
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 3, 6],
(0, 1): [1, 3],
(0, 2): [2, 3, 7],
(1, 0): [0, 4],
(1, 1): [1, 4, 6, 7],
(1, 2): [2, 4],
(2, 0): [0, 5, 7],
(2, 1): [1, 5],
(2, 2): [2, 5, 6]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 3, 6]),
((0, 1), [1, 3]),
((0, 2), [2, 3, 7]),
((1, 0), [0, 4]),
((1, 1), [1, 4, 6, 7]),
((1, 2), [2, 4]),
((2, 0), [0, 5, 7]),
((2, 1), [1, 5]),
((2, 2), [2, 5, 6])]
>>> connected_cells = connected_cells_np(struct[1], struct[2], d)
>>> pprint(connected_cells) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)],
(0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1),
(2, 2), (0, 2)],
(1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)],
(1, 1): [(0, 1),
(1, 2),
(0, 0),
(0, 2),
(2, 1),
(2, 0),
(2, 2),
(1, 0),
(1, 1)],
(1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)],
(2, 0): [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2),
(1, 0), (1, 1)],
(2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)],
(2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1),
(2, 2), (0, 2)]})
>>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]),
((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]),
((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]),
((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0),
(2, 2), (1, 0), (1, 1)]),
((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]),
((2, 0), [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)]),
((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]),
((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])]
"""
n = lines[0].size
shape = [n] * d
connected_cells: Connected_cells = DefaultDict(list)
for cell, lines_enums in scopes.items():
for line_enum in lines_enums:
for j in range(n):
cc = np.unravel_index(lines[line_enum][j], shape)
connected_cells[cell].append(cc)
connected_cells[cell] = list(set(connected_cells[cell]))
return connected_cells
def get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord:
"""
get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_coord(d, n).
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to the coordinates of each cell in
the hypercube. For each cell key, the value is the cell's
scope - a list of coordinates that are lines containing
the cell.
See Also
--------
get_lines_coord
Examples
--------
>>> from pprint import pprint
>>> lines = list(get_lines_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)],
[(0, 1), (1, 1)],
[(0, 0), (0, 1)],
[(1, 0), (1, 1)],
[(0, 0), (1, 1)],
[(0, 1), (1, 0)]]
>>> scopes = get_scopes_coord(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]],
(0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]],
(1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]],
(1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]),
((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]),
((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]),
((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])]
"""
n = len(lines[0])
scopes: Scopes_coord = DefaultDict(list)
cells = it.product(range(n), repeat = d) # get all possible cells
for cell in cells:
for line in lines:
if cell in line:
scopes[cell].append(line)
return scopes
def structure_coord(d: int, n: int) -> Structure_coord:
"""
structure_coord(d: int, n: int) -> Structure_coord:
Return lines, and the scopes of its cells, for h(d, n)
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
Lines, and the scopes of its cells, for h(d, n)
See Also
--------
get_lines_coord
get_scopes_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_coord(2, 2)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]]
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]],
(0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]],
(1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]],
(1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]),
((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]),
((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]),
((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])]
"""
lines = list(get_lines_coord(d, n))
scopes = get_scopes_coord(lines, d)
return (lines, scopes)
def get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord:
"""
get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord:
Returns enumerated lines of a hypercube
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
Enumerated lines in h(d, n).
See Also
--------
get_lines_coord
Examples
--------
>>> lines = get_lines_enum_coord(2, 2)
>>> lines #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
"""
lines: Lines_enum_coord = dict()
idx = 0
for line in get_lines_coord(d, n):
lines[idx] = line
idx += 1
return lines
def get_scopes_enum_coord(lines: Lines_enum_coord, d: int) -> Scopes_enum:
"""
get_scopes_enum_coord(lines: Lines_enum_coord, d: int) ->
Scopes_enum:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_enum_coord(d, n).
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to each cell coordinates of the
hypercube. For each cell key, the value is the cell's
scope - a list of line enumerations that are lines containing
the cell.
See Also
--------
get_lines_enum_coord
Examples
--------
>>> from pprint import pprint
>>> lines = get_lines_enum_coord(2, 2)
>>> lines #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
>>> scopes = get_scopes_enum_coord(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]),
((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]),
((1, 1), [1, 3, 4])]
"""
n = len(lines[0])
scopes: Scopes_enum = DefaultDict(list)
cells = it.product(range(n), repeat = d) # get all possible cells
for cell in cells:
for idx, line in lines.items():
if cell in line:
scopes[cell].append(idx)
return scopes
def structure_enum_coord(d: int, n: int) -> Structure_enum_coord:
"""
structure_enum_coord(d: int, n: int) ->
Structure_enum_coord:
Return enumerated lines, and the scopes of its cells, for h(d, n)
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
Enumerated lines, and the scopes of its cells, for h(d, n)
See Also
--------
get_lines_enum_coord
get_scopes_enum_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_enum_coord(2, 2)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]),
((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]),
((1, 1), [1, 3, 4])]
"""
lines = get_lines_enum_coord(d, n)
scopes = get_scopes_enum_coord(lines, d)
return (lines, scopes)
def connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum) -> Connected_cells:
"""
connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum)
-> Connected_cells:
Calculate the connected cells for a cube.
Parameters
----------
lines
The enumerated lines of the hypercube
scopes
The enumerated scopes of the hypercube
Returns
------
A dictionary with keys beings cell coordinates and values the
connected cell coordinates.
See Also
--------
structure_enum_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_enum_coord(2, 3)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0), (2, 0)],
1: [(0, 1), (1, 1), (2, 1)],
2: [(0, 2), (1, 2), (2, 2)],
3: [(0, 0), (0, 1), (0, 2)],
4: [(1, 0), (1, 1), (1, 2)],
5: [(2, 0), (2, 1), (2, 2)],
6: [(0, 0), (1, 1), (2, 2)],
7: [(0, 2), (1, 1), (2, 0)]}
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 3, 6],
(0, 1): [1, 3],
(0, 2): [2, 3, 7],
(1, 0): [0, 4],
(1, 1): [1, 4, 6, 7],
(1, 2): [2, 4],
(2, 0): [0, 5, 7],
(2, 1): [1, 5],
(2, 2): [2, 5, 6]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 3, 6]),
((0, 1), [1, 3]),
((0, 2), [2, 3, 7]),
((1, 0), [0, 4]),
((1, 1), [1, 4, 6, 7]),
((1, 2), [2, 4]),
((2, 0), [0, 5, 7]),
((2, 1), [1, 5]),
((2, 2), [2, 5, 6])]
>>> connected_cells = connected_cells_coord(*struct)
>>> pprint(connected_cells) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)],
(0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1),
(2, 2), (0, 2)],
(1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)],
(1, 1): [(0, 1),
(1, 2),
(0, 0),
(0, 2),
(2, 1),
(2, 0),
(2, 2),
(1, 0),
(1, 1)],
(1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)],
(2, 0): [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)],
(2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1),
(2, 2), (0, 2)]})
>>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]),
((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]),
((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]),
((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0),
(2, 2), (1, 0), (1, 1)]),
((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]),
((2, 0), [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]),
((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])]
"""
connected_cells: Connected_cells = DefaultDict(list)
for cell, lines_enums in scopes.items():
for line_enum in lines_enums:
connected_cells[cell].extend(lines[line_enum])
connected_cells[cell] = list(set(connected_cells[cell]))
return connected_cells
def scopes_size(scopes: Scopes) -> Counter:
"""
scopes_size(scopes: Scopes) -> Counter:
Calculate the different scope lengths.
Parameters
----------
scopes
Dictionary of cells (keys) and their scopes
Returns
-------
Counter of scopes lengths (key) and their frequency (values).
See Also
--------
get_scopes_np
get_scopes_coord
Examples
--------
>>> import numpy as np
>>> scopes = structure_np(2, 3)[2]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_enum_np(2, 3)[2]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_coord(2, 3)[1]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_enum_coord(2, 3)[1]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
"""
return Counter([len(scope) for scope in scopes.values()])
def scopes_size_cell(scopes: Scopes) -> DefaultDict[int, List[Cell_coord]]:
"""
scopes_size_cell(scopes: Scopes) ->
DefaultDict[int, List[Cell_coord]]:
Group cells by length of their scope.
Parameters
----------
scopes
Dictionary of cells (keys) and their scopes
Returns
-------
Dictonary of scopes lengths (key) and the list of cells with
scopes of that length.
See Also
--------
get_scopes_np
get_scopes_coord
get_scopes_enum
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> scopes = structure_np(2, 3)[2]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(1, 0), (0, 1), (2, 1), (1, 2)],
3: [(0, 0), (2, 0), (0, 2), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(1, 0), (0, 1), (2, 1), (1, 2)]),
(3, [(0, 0), (2, 0), (0, 2), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_enum_np(2, 3)[2]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(1, 0), (0, 1), (2, 1), (1, 2)],
3: [(0, 0), (2, 0), (0, 2), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(1, 0), (0, 1), (2, 1), (1, 2)]),
(3, [(0, 0), (2, 0), (0, 2), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_coord(2, 3)[1]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(0, 1), (1, 0), (1, 2), (2, 1)],
3: [(0, 0), (0, 2), (2, 0), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(0, 1), (1, 0), (1, 2), (2, 1)]),
(3, [(0, 0), (0, 2), (2, 0), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_enum_coord(2, 3)[1]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(0, 1), (1, 0), (1, 2), (2, 1)],
3: [(0, 0), (0, 2), (2, 0), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(0, 1), (1, 0), (1, 2), (2, 1)]),
(3, [(0, 0), (0, 2), (2, 0), (2, 2)]),
(4, [(1, 1)])]
"""
scopes_size_cell: DefaultDict[int, List[Cell_coord]] = DefaultDict(list)
for cell, scope in scopes.items():
scopes_size_cell[len(scope)].append(cell)
return scopes_size_cell
####################################################################################################
# The following 3 functions are for the displaying of a hypercube to a terminal.
# It is assumed that an numpy ndarray has been used to represent the hypercube
def display_np(hc: Cube_np, display_cell: Callable[[Any], Tuple[str, str, str]] = None, ul = False) -> str:
"""
display_np(hc: Cube_np, display_cell: Callable[[Any],
Tuple[str, str, str]] = None, ul = False) ->
str:
Construct a string to display the hypercube in the terminal.
Parameters
----------
hc
The hypercube to be displayed
display_cell
A callback function called with the value of each cell value.
It returns a tuple of strings - the character/string to be
displayed, and any formatting to be applied (typically ansi
color sequences). See Examples for how colors are specified.
If display_cell is not provided, the cell value is displayed.
ul
display_np calls itself recursively (see Notes). This parameter
is used to track whether a cell is on the bottom row of a
2-d array. It has direct impact when the user calls dislay_np
unless the array is 1-d, in which case it determines if cell
values are underlined when displayed.
Returns
-------
A string that can be printed to the terminal to display the
hypercube.
See Also
--------
underline
join_multiline
Notes
-----
The '|' character is used to represent the board horizontally.
Cell contents are underlined in order to represent the board
vertically. For example, the character 'X' is underlined to
give 'X'. This function is recursive, it starts with hypercube and
keeps removing dimensions until at a single cell, which can be
given a string value. We are trying to display d dimensions in
two dimensions. To do this, odd dimensions are
shown horizontally; even dimensions are shown vertically.
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> def dc(v: Any) -> Tuple[str, str, str]:
...
... # define colors - could also use colorama module
... # red foreground + yellow background
... pre_fmt = '\033[31;43m'
... post_fmt = '\033[0m' # removes color settings
...
... if v > 0:
... return 'X', pre_fmt, post_fmt
... elif v < 0:
... return 'O', pre_fmt, post_fmt
... else:
... return ' ', '', ''
>>> d = 3
>>> n = 3
>>> hc = np.zeros((n,) * d, dtype = int)
>>> hc[0, 0, 0] = 1
>>> hc[1, 1, 1] = -1
>>> disp = display_np(hc, dc)
>>> print(disp) #doctest: +SKIP
X|_|_ _|_|_ _|_|_
_|_|_ _|O|_ _|_|_
| | | | | |
"""
if hc.size == 1: # hc is a single cell
if display_cell is None:
s, pre_fmt, post_fmt = str(hc), '', ''
else:
s, pre_fmt, post_fmt = display_cell(hc)
# underline displayed string (to repsent board structure) unless
# string is in the bottom row of array
if ul:
s = '_' * len(s) if s.isspace() else underline(s)
return pre_fmt + s + post_fmt
# hc is not a single cell
d = hc.ndim
# break the array into sub arrays along the first dimension
sub_hc = [hc[i] for i in range(hc.shape[0])]
# constuct a string for each sub array
sub_hc_str = []
for c, a in enumerate(sub_hc):
if d == 2 and c == len(sub_hc) - 1:
# sub arr is 2-dimensional and last row - don't underline
ul = False
elif d != 1:
ul = True
sub_hc_str.append(display_np(a, display_cell, ul))
# join the sub strings
if d % 2 == 0: # even number of dimensions - display down the screen
if d == 2:
return ''.join('\n'.join(sub_hc_str))
else:
sp = '\n' + '\n' * (int((d / 2) ** 1.5) - 1) # increase space between higher dimesions
return sp.join(sub_hc_str)
else: # odd number of dimensions - display across the screen
if d == 1:
return '|'.join(sub_hc_str)
else:
return join_multiline(sub_hc_str, ' ' + ' ' * int((d - 2) ** 1.5) + ' ', False)
def underline(s: str, alpha_only = True) -> str:
"""
underline(s: str, alpha_only = True) -> str
Underlines a string.
Parameters
----------
s
The string to be underlined
Returns
-------
An underlined string
Notes
-----
The code appears only to work properly with alphabetic characters.
Examples
--------
>>> underline('X')
'X'
>>> underline('XX')
'XX'
>>> underline('1')
'1'
>>> underline('1', False)
'1'
"""
try:
if alpha_only:
s_ = ""
for chr in str(s):
if chr.isalpha():
s_ = s_ + chr + "\u0332"
else:
s_ = s_ + chr
return s_
else:
return ''.join([chr + "\u0332" for chr in str(s)])
except:
return s
def join_multiline(iter: Iterable[str], divider: str = ' ', divide_empty_lines: bool = False,
fill_value: str = '_') -> str:
"""
join_multiline(iter: Iterable[str], divider: str = ' ',
divide_empty_lines: bool = False,
fill_value: str = '_') -> str
Join multiline string line by line.
Parameters
----------
iter
An iterable of multiline (or single line) strings
divider
String to divide the corresponding lines in each iterable
divide_empty_lines
If the corresponding line in each iterable is blank, then
determines if the lines are still divided by divider, or
divided by ''.
fill_value
If the number of lines in each multiline string in iter
differs, then fill_value is used to fill in values of the
shorter strings.
Returns
-------
The joined string.
Examples
--------
>>> # note that newline has to be escaped to work in doctest
examples below.
>>> ml_1 = 'AA\\nMM\\nXX'
>>> ml_2 = 'BB\\nNN\\nYY'
>>> ml_3 = 'CC\\nOO\\nZZ'
>>> ml = join_multiline([ml_1, ml_2, ml_3])
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA BB CC
MM NN OO
XX YY ZZ
>>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
MM_NN_OO
XX_YY_ZZ
>>> ml_3 = 'CC\\nOO'
>>> ml = join_multiline([ml_1, ml_2, ml_3], fill_value = '@')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA BB CC
MM NN OO
XX YY @
>>> ml_1 = 'AA\\n\\nMM'
>>> ml_2 = 'BB\\n\\nNN'
>>> ml_3 = 'CC\\n\\nZZ'
>>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
<BLANKLINE>
MM_NN_ZZ
>>> ml = join_multiline([ml_1, ml_2, ml_3], '_', True)
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
__
MM_NN_ZZ
"""
# for each multiline block, split into individual lines
spl = [x.split('\n') for x in iter]
# create list of tuples with tuple i containing line i from each multiline block
tl = [i for i in it.zip_longest(*spl, fillvalue = fill_value)]
if divide_empty_lines:
st = [divider.join(t) for t in tl]
else:
st = []
for t in tl:
if all([not x.strip() for x in t]):
st.append('')
else:
st.append(divider.join(t))
# finally, join each string separated by a new line
return '\n'.join(st)
####################################################################################################
####################################################################################################
# The following functions are helper functions
def slice_ndarray(arr: Cube_np, dims: Collection[int], coords: Collection[int]) -> Cube_np:
"""
slice_ndarray(arr: Cube_np, dims: Collection[int],
coords: Collection[int]) ->
Cube_np:
Returns a slice of a hypercube.
Parameters
----------
arr
The hypercube to be sliced
dims
The dims to slice along
coords
The coordinates corresponding to the dimensions being sliced
Returns
-------
A view of a slice of `arr`.
Raises
------
ValueError
If length of `dims` is not equal to length of `coords`
Examples
--------
>>> import numpy as np
>>> arr = np.arange(8).reshape(2, 2, 2)
>>> arr
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> slice_ndarray(arr, (0,), (0,))
array([[0, 1],
[2, 3]])
>>> slice_ndarray(arr, (1, 2), (0, 0))
array([0, 4])
"""
# create a list of slice objects, one for each dimension of the array
# Note: slice(None) is the same as ":". E.g. arr[:, 4] = arr[slice(none), 4)]
sl: List[Union[slice, int]] = [slice(None)] * arr.ndim
if len(dims) != len(coords):
raise ValueError("dims and coords must be of the same length")
for dim, coord in zip(dims, coords):
sl[dim] = coord
return arr[tuple(sl)]
def insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]:
"""
insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]],
val: Any) ->
Tuple[int, ...]:
Insert values into a tuple.
Parameters
----------
tup
the tuple into which values are to be inserted
pos
The positions into which values are to be inserted
val
The values corresponding to the positions in `pos`
Returns
-------
A copy of `tup` with values inserted.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
Examples
--------
>>> tup = (0, 1, 2, 3)
>>> pos = (5, 1)
>>> val = (9, 8)
>>> insert_into_tuple(tup, pos, val)
(0, 8, 1, 2, 3, 9)
>>> insert_into_tuple(tup, (), ())
(0, 1, 2, 3)
"""
tl = list(tup)
if isinstance(pos, int):
tl.insert(pos, val)
else:
if len(pos) != len(val):
raise ValueError("pos and val must be of the same length")
if len(pos) == 0:
return tup
# sort pos so from low to high; sort val correspondingly
stl = list(zip(*sorted(zip(pos, val))))
for p, v in zip(stl[0], stl[1]):
tl.insert(p, v)
return tuple(tl)
def increment_cell_coord(cell: Cell_coord, pos: Sequence[int], incr: Sequence[int], add: bool = True) -> Cell_coord:
"""
increment_cell_coord(cell: Cell_coord, pos: Sequence[int],
incr: Sequence[int], add: bool = True) ->
Cell_coord:
Increments coordinates of a cell.
Parameters
----------
cell
the cell which will have coordinates incremented
pos
The coordinates which are to be incremented
incr
The increment values at the specified coordinates
add
If True, the the increments are added, else they are subtracted
Returns
-------
A copy of `cell` with incremented coordinates.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
Examples
--------
>>> cell = (1, 2, 1)
>>> pos = (0, 2)
>>> incr = (1, -1)
>>> increment_cell_coord(cell, pos, incr)
(2, 2, 0)
>>> increment_cell_coord(cell, pos, incr, False)
(0, 2, 2)
"""
if len(pos) != len(incr):
raise ValueError("pos and incr must be of the same length")
if len(pos) == 0:
return cell
cl = list(cell)
for i in range(len(pos)):
if add:
cl[pos[i]] += incr[i]
else:
cl[pos[i]] -= incr[i]
return tuple(cl)
def str_to_tuple(d: int, n: int, cell: str, offset: int = 1) -> Cell_coord:
"""
str_to_tuple(d: int, n: int, cell: str, offset: int = 1) ->
Cell_coord:
Returns cells coordinates provided as a string as a tuple
of integers.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
cell
Cell coordinates specified as a string (see Notes).
Will accept a non-string argument which will be cast
to a string.
offset
idx offset - typically 0 or 1.
Raises
------
ValueError
1. if digits are not separated and the n is greater than 9
2. Incorrect numbers of coordinates provided
3. One or more coordinates is not valid
Notes
-----
If the string is all digits then assumes that each digit is a
coordinate. If non-digit characters are provided then assumes that
these split coordinates.
Returns
-------
A tuple containing the cell coordinates.
Examples
--------
>>> d = 3
>>> n = 3
>>> str_to_tuple(d, n, '123')
(0, 1, 2)
>>> str_to_tuple(d, n, '012', offset = 0)
(0, 1, 2)
>>> str_to_tuple(d, n, '1,2::3')
(0, 1, 2)
>>> str_to_tuple(d, n, 123)
(0, 1, 2)
>>> str_to_tuple(d, n, '12')
Traceback (most recent call last):
...
ValueError: Incorrect number of coordinates provided
>>> str_to_tuple(d, n, '125')
Traceback (most recent call last):
...
ValueError: One or more coordinates are not valid
>>> d = 3
>>> n = 10
>>> str_to_tuple(d, n, '123') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Board is too big for each dimension to be specified
by single digit
"""
cell = str(cell)
# check to see if there are any non-digits
nd = re.findall(r'\D+', cell)
if len(nd) == 0:
if n > 9:
raise ValueError("Board is too big for each dimension to be specified by single digit")
else:
tup = tuple(int(coord) - offset for coord in cell)
else: # there are non-digits, use these as separators
tup = tuple(int(coord) - offset for coord in re.findall(r'\d+', cell))
# check that correct number of coordinates specified
if len(tup) != d:
raise ValueError("Incorrect number of coordinates provided")
# check that each coordinate is valid
if all(t in range(n) for t in tup):
return tup
else:
raise ValueError("One or more coordinates are not valid")
def remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord:
"""
remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord
Remove cells that do not have valid coordinates.
Parameters
----------
n
The number of cells in any dimension
line
list of tuples representing cell coordinates (possibly invalid)
Returns
-------
list of tuples representing valid cell coordinate
Examples
--------
>>> n = 3
>>> line = [(1, 2, 0), (-1, 0, 3), (0, 1, 2), (1, 2, 3)]
>>> remove_invalid_cells_coord(n, line)
[(1, 2, 0), (0, 1, 2)]
"""
rl = []
for cell in line:
if all(coord in range(n) for coord in cell):
rl.append(cell)
return rl
####################################################################################################
# used in internal testing
def _lines_np_coord_check(d: int, n: int) -> bool:
"""
_lines_np_coord_check(d: int, n: int) -> bool
Checks if lines_np and lines_coord give the same lines.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
True if lines_np and lines_coord give the same lines.
False otherwise.
See Also
--------
get_lines_np
get_lines_coord
Notes
-----
This function is a private function used in testing.
"""
dtype = np.int64 if n ** d > 2 ** 31 else np.int32
arr = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines_np = get_lines_np(arr)
lines_coord = get_lines_coord(d, n)
t_np = [tuple(sorted(l.tolist())) for l in lines_np] # type: ignore
t_coord = [tuple(sorted([arr[c] for c in l])) for l in lines_coord]
return set(t_np) == set(t_coord)
| 30.374202 | 116 | 0.510575 |
f81ca2ce592e84428e81a66ce38e515a6ee5edcf
| 42 |
py
|
Python
|
firecloud/__about__.py
|
jnktsj/fiss
|
2cfce1f6dc0c43f62c51e8a9296946b9990a76fa
|
[
"BSD-3-Clause"
] | 20 |
2017-08-05T08:44:51.000Z
|
2022-03-24T15:33:48.000Z
|
firecloud/__about__.py
|
jnktsj/fiss
|
2cfce1f6dc0c43f62c51e8a9296946b9990a76fa
|
[
"BSD-3-Clause"
] | 117 |
2016-10-26T15:31:48.000Z
|
2022-02-16T23:06:33.000Z
|
firecloud/__about__.py
|
jnktsj/fiss
|
2cfce1f6dc0c43f62c51e8a9296946b9990a76fa
|
[
"BSD-3-Clause"
] | 21 |
2017-03-13T15:16:03.000Z
|
2022-02-25T19:14:36.000Z
|
# Package version
__version__ = "0.16.31"
| 14 | 23 | 0.714286 |
f81e6f765fb2c951a1b3a358bc3ab07fe69f4752
| 11,140 |
py
|
Python
|
simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py
|
IMSY-DKFZ/simpa
|
b8bddcf43a4bff2564f0ec208dc511b82e49bfb4
|
[
"MIT"
] | 3 |
2022-03-14T15:40:09.000Z
|
2022-03-20T02:34:25.000Z
|
simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py
|
jgroehl/simpa
|
e56f0802e5a8555ee8bb139dd4f776025e7e9267
|
[
"MIT"
] | 3 |
2022-03-18T07:19:12.000Z
|
2022-03-30T12:15:19.000Z
|
simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py
|
IMSY-DKFZ/simpa
|
b8bddcf43a4bff2564f0ec208dc511b82e49bfb4
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa.core.device_digital_twins import SlitIlluminationGeometry, LinearArrayDetectionGeometry, PhotoacousticDevice
from simpa import perform_k_wave_acoustic_forward_simulation
from simpa.core.simulation_modules.reconstruction_module.reconstruction_module_delay_and_sum_adapter import \
reconstruct_delay_and_sum_pytorch
from simpa import MCXAdapter, ModelBasedVolumeCreationAdapter, \
GaussianNoise
from simpa.utils import Tags, Settings, TISSUE_LIBRARY
from simpa.core.simulation import simulate
from simpa.io_handling import load_data_field
import numpy as np
from simpa.utils.path_manager import PathManager
from simpa_tests.manual_tests import ManualIntegrationTestClass
import matplotlib.pyplot as plt
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
if __name__ == '__main__':
test = KWaveAcousticForwardConvenienceFunction()
test.run_test(show_figure_on_screen=False)
| 48.859649 | 119 | 0.668223 |
f81ea939afded2dfd41116deec7708196341c5d1
| 10,881 |
py
|
Python
|
oc_ocdm/counter_handler/filesystem_counter_handler.py
|
arcangelo7/oc_ocdm
|
128d062ce9d858024aafd26d7d238c7a26cc8914
|
[
"0BSD"
] | 1 |
2020-12-17T15:33:01.000Z
|
2020-12-17T15:33:01.000Z
|
oc_ocdm/counter_handler/filesystem_counter_handler.py
|
arcangelo7/oc_ocdm
|
128d062ce9d858024aafd26d7d238c7a26cc8914
|
[
"0BSD"
] | 26 |
2021-01-08T08:32:23.000Z
|
2022-03-29T10:01:40.000Z
|
oc_ocdm/counter_handler/filesystem_counter_handler.py
|
arcangelo7/oc_ocdm
|
128d062ce9d858024aafd26d7d238c7a26cc8914
|
[
"0BSD"
] | 3 |
2021-04-16T08:44:44.000Z
|
2022-02-15T11:09:22.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
import os
from shutil import copymode, move
from tempfile import mkstemp
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import BinaryIO, Tuple, List, Dict
from oc_ocdm.counter_handler.counter_handler import CounterHandler
| 42.503906 | 111 | 0.637074 |
f81fb7d0b255f47fb45c7a694f335756c5c2bb24
| 3,823 |
py
|
Python
|
backend_app/serializers.py
|
ilveroluca/backend
|
91b80b154c4e1e45587797cc41bf2b2b75c23e68
|
[
"MIT"
] | null | null | null |
backend_app/serializers.py
|
ilveroluca/backend
|
91b80b154c4e1e45587797cc41bf2b2b75c23e68
|
[
"MIT"
] | null | null | null |
backend_app/serializers.py
|
ilveroluca/backend
|
91b80b154c4e1e45587797cc41bf2b2b75c23e68
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from backend_app import models
# RESPONSES SERIALIZERS
| 27.905109 | 96 | 0.698666 |
f820475f96913877c23f5aa594fcc87cf676cc00
| 1,296 |
py
|
Python
|
src/api_status_monitor/consumer/database_connection.py
|
jjaakola/bang-a-gong
|
d30f889c18eeaff3d62d47cd02e93516e4d24dd7
|
[
"MIT"
] | null | null | null |
src/api_status_monitor/consumer/database_connection.py
|
jjaakola/bang-a-gong
|
d30f889c18eeaff3d62d47cd02e93516e4d24dd7
|
[
"MIT"
] | null | null | null |
src/api_status_monitor/consumer/database_connection.py
|
jjaakola/bang-a-gong
|
d30f889c18eeaff3d62d47cd02e93516e4d24dd7
|
[
"MIT"
] | null | null | null |
"""The database connection manager.
"""
import logging
import psycopg2
| 31.609756 | 86 | 0.500772 |
f8207cbc88a40509eaabe2f12c2e9fb96d02736a
| 1,154 |
py
|
Python
|
app/cvp.py
|
ekiminatorn/murmur-rest
|
594060264cd6ea594d5c07f40163782946f48eb2
|
[
"Unlicense",
"MIT"
] | 73 |
2015-01-08T19:58:36.000Z
|
2022-01-25T20:44:07.000Z
|
app/cvp.py
|
ekiminatorn/murmur-rest
|
594060264cd6ea594d5c07f40163782946f48eb2
|
[
"Unlicense",
"MIT"
] | 34 |
2015-01-08T19:52:34.000Z
|
2022-03-15T08:36:30.000Z
|
app/cvp.py
|
ekiminatorn/murmur-rest
|
594060264cd6ea594d5c07f40163782946f48eb2
|
[
"Unlicense",
"MIT"
] | 33 |
2015-01-08T19:22:40.000Z
|
2022-01-19T06:28:37.000Z
|
"""
cvp.py
Functions for generating CVP feeds.
:copyright: (C) 2014 by github.com/alfg.
:license: MIT, see README for more details.
"""
def cvp_player_to_dict(player):
"""
Convert a player object from a Tree to a CVP-compliant dict.
"""
return {
"session": player.session,
"userid": player.userid,
"name": player.name,
"deaf": player.deaf,
"mute": player.mute,
"selfDeaf": player.selfDeaf,
"selfMute": player.selfMute,
"suppress": player.suppress,
"onlinesecs": player.onlinesecs,
"idlesecs": player.idlesecs
}
def cvp_chan_to_dict(channel):
"""
Convert a channel from a Tree object to a CVP-compliant dict, recursively.
"""
return {
"id": channel.c.id,
"parent": channel.c.parent,
"name": channel.c.name,
"description": channel.c.description,
"channels": [cvp_chan_to_dict(c) for c in channel.children],
"users": [cvp_player_to_dict(p) for p in channel.users],
"position": channel.c.position,
"temporary": channel.c.temporary,
"links": channel.c.links
}
| 26.837209 | 78 | 0.604853 |
f82135374f4390dc528fb4356d78faff21f4ca0a
| 5,951 |
py
|
Python
|
Tools/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
|
VincentWei/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | 6 |
2017-05-31T01:46:45.000Z
|
2018-06-12T10:53:30.000Z
|
Tools/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
|
FMSoftCN/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | null | null | null |
Tools/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
|
FMSoftCN/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | 2 |
2017-07-17T06:02:42.000Z
|
2018-09-19T10:08:38.000Z
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base
import unittest
import tempfile
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.thirdparty.mock import Mock
| 46.858268 | 175 | 0.682406 |
f8237a8940cd62de0269063bae0eb6296bc0aa2a
| 2,796 |
py
|
Python
|
data/classifier/general_test.py
|
alexv1/tensorflow_learn
|
ae936ffdc211a11403d6a06401a2115334b46402
|
[
"Apache-2.0"
] | null | null | null |
data/classifier/general_test.py
|
alexv1/tensorflow_learn
|
ae936ffdc211a11403d6a06401a2115334b46402
|
[
"Apache-2.0"
] | null | null | null |
data/classifier/general_test.py
|
alexv1/tensorflow_learn
|
ae936ffdc211a11403d6a06401a2115334b46402
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from skimage import transform
import tensorflow as tf
import numpy as np
import glob
import face_recognition as FR
import os
import shutil
| 27.96 | 87 | 0.672031 |
f8238013e026edf0a1b82a52242ee8f202d32c83
| 693 |
py
|
Python
|
func.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | null | null | null |
func.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | null | null | null |
func.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | 1 |
2021-02-06T04:28:17.000Z
|
2021-02-06T04:28:17.000Z
|
print("Enter Values of cylindrical part of tent ")
h = float(input("Height : "))
r = float(input("radius : "))
csa_cyl = cyl(h, r)
l = float(input("Enter slant height "))
csa_con = con(r, l)
canvas_area = csa_cyl + csa_con
print("Area of canvas = ", canvas_area, " m^2")
unit_price = float(input("Enter cost of 1 m^2 "))
total_price = unit_price * canvas_area
print("Total cost of canvas before tax ",total_price)
print("Inluding tax"+ str(final_price(total_price)))
| 28.875 | 53 | 0.658009 |
f823c6094a403ab6a62faccb2e76b2e2b2d997a0
| 1,282 |
py
|
Python
|
pymoku/plotly_support.py
|
manekawije/Liquid
|
284991ceca70ec3fcd0cca7e19f4100463600a6c
|
[
"MIT"
] | null | null | null |
pymoku/plotly_support.py
|
manekawije/Liquid
|
284991ceca70ec3fcd0cca7e19f4100463600a6c
|
[
"MIT"
] | null | null | null |
pymoku/plotly_support.py
|
manekawije/Liquid
|
284991ceca70ec3fcd0cca7e19f4100463600a6c
|
[
"MIT"
] | null | null | null |
# Plotly integration for the Moku:Lab Datalogger
# Copyright 2016 Liquid Instruments Pty. Ltd.
from pymoku import InvalidOperationException
| 26.163265 | 92 | 0.705148 |
f8277c470e26c658915e5f878e41e448502ec2a5
| 1,126 |
py
|
Python
|
test_publisher.py
|
cpgillem/markdown_publisher
|
a8e6bacea95196b9a18ad8fa2f85822c5d9c4e74
|
[
"MIT"
] | null | null | null |
test_publisher.py
|
cpgillem/markdown_publisher
|
a8e6bacea95196b9a18ad8fa2f85822c5d9c4e74
|
[
"MIT"
] | 3 |
2015-04-11T08:16:56.000Z
|
2015-04-11T08:17:32.000Z
|
test_publisher.py
|
cpgillem/markdown-publisher
|
a8e6bacea95196b9a18ad8fa2f85822c5d9c4e74
|
[
"MIT"
] | null | null | null |
import publisher
test_pdf_filename = "test/test.pdf"
test_css_filename = "test/test.css"
test_md_filename = "test/test.md"
test_html_filename = "test/test.html"
test_sender = "[email protected]"
test_recipient = "[email protected]"
test_md = "# Test heading\n\n- test item 1\n- test item 2"
# The test case currently in use
from_md_to_html_email()
| 32.171429 | 87 | 0.781528 |
f82900deb38425b32b0150ae828a4448ba15499c
| 24 |
py
|
Python
|
src/train/__init__.py
|
gracengu/multinomial_classification
|
2346533415aff151d1774d36405360ca236cee3f
|
[
"MIT"
] | 2 |
2021-11-16T12:52:58.000Z
|
2021-12-13T04:00:39.000Z
|
src/train/__init__.py
|
gracengu/multinomial_classification
|
2346533415aff151d1774d36405360ca236cee3f
|
[
"MIT"
] | null | null | null |
src/train/__init__.py
|
gracengu/multinomial_classification
|
2346533415aff151d1774d36405360ca236cee3f
|
[
"MIT"
] | null | null | null |
from .train import Train
| 24 | 24 | 0.833333 |
f82c17e0d48a8946b94491663089d67afc63ece3
| 1,185 |
py
|
Python
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 5 |
2015-07-21T15:58:31.000Z
|
2019-09-14T22:34:00.000Z
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 197 |
2015-03-24T15:26:04.000Z
|
2017-11-28T19:24:37.000Z
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 10 |
2015-03-24T12:26:36.000Z
|
2017-02-21T13:08:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 38.225806 | 118 | 0.599156 |
f82d5b036daead0dff75c2761e785f8a14568edb
| 191 |
py
|
Python
|
src/Models/__init__.py
|
shulip/ShoppingMallSystem
|
01e5a04a8353ca319ed2dc002fc358f6e44c33dd
|
[
"MIT"
] | null | null | null |
src/Models/__init__.py
|
shulip/ShoppingMallSystem
|
01e5a04a8353ca319ed2dc002fc358f6e44c33dd
|
[
"MIT"
] | null | null | null |
src/Models/__init__.py
|
shulip/ShoppingMallSystem
|
01e5a04a8353ca319ed2dc002fc358f6e44c33dd
|
[
"MIT"
] | 1 |
2021-04-22T15:14:21.000Z
|
2021-04-22T15:14:21.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from .Contract import *
from .Receivable import *
from .Receipt import *
from .Shop import *
from .Statement import *
from .Application import *
| 21.222222 | 26 | 0.701571 |
f82d7cf376b5b98be3742039b95afbfff6e6b1f8
| 1,630 |
py
|
Python
|
description tm.py
|
jfoerderer/lda-topic-modeling
|
998701f87df3a3d034d9208ff60266dcd6dc2b59
|
[
"MIT"
] | 2 |
2017-09-02T09:00:24.000Z
|
2017-09-08T07:18:38.000Z
|
description tm.py
|
jfoerderer/lda-topic-modeling
|
998701f87df3a3d034d9208ff60266dcd6dc2b59
|
[
"MIT"
] | null | null | null |
description tm.py
|
jfoerderer/lda-topic-modeling
|
998701f87df3a3d034d9208ff60266dcd6dc2b59
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import csv
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora
import gensim
import os
import re
from nltk.tokenize import RegexpTokenizer
#SET PATH
path = r''
inputname=""
def remove_html_tags(text):
"""Remove html tags from a string"""
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
#setup
tokenizer = RegexpTokenizer(r'\w+')
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
fn = os.path.join(path, inputname)
doc_set = []
with open(fn, encoding="utf8" ) as f:
csv_f = csv.reader(f)
for i, row in enumerate(csv_f):
if i > 1 and len(row) > 1 :
temp=remove_html_tags(row[1])
temp = re.sub("[^a-zA-Z ]","", temp)
doc_set.append(temp)
texts = []
for i in doc_set:
if i.strip():
raw = i.lower()
tokens = tokenizer.tokenize(raw)
if len(tokens)>5:
stopped_tokens = [i for i in tokens if not i in en_stop]
texts.append(stopped_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary, num_topics=5 )
print (lsi.print_topics(num_topics=3, num_words=3))
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=20, id2word = dictionary, passes=20)
print(ldamodel.print_topics(num_topics=20, num_words=5))
K = ldamodel.num_topics
topicWordProbMat = ldamodel.print_topics(K)
| 26.721311 | 99 | 0.628834 |
f82ef0c0ee2c3fc021e7566fc3d68636a538299f
| 596 |
py
|
Python
|
scripts/load_sample_data.py
|
thobbs/logsandra
|
a17abc995dcb0573f3db2f714c1b47d3aff8b20a
|
[
"MIT"
] | 7 |
2015-05-18T13:00:54.000Z
|
2018-08-06T08:27:57.000Z
|
scripts/load_sample_data.py
|
thobbs/logsandra
|
a17abc995dcb0573f3db2f714c1b47d3aff8b20a
|
[
"MIT"
] | null | null | null |
scripts/load_sample_data.py
|
thobbs/logsandra
|
a17abc995dcb0573f3db2f714c1b47d3aff8b20a
|
[
"MIT"
] | 4 |
2015-06-16T11:09:53.000Z
|
2020-04-27T19:25:57.000Z
|
#!/usr/bin/env python
import sys
import os
sys.path.append(os.path.join(os.path.dirname('__file__'), '..', 'src'))
from random import randint
from datetime import datetime, timedelta
from logsandra.model.client import CassandraClient
client = CassandraClient('test', 'localhost', 9160, 3)
keywords = ['foo', 'bar', 'baz']
print "Loading sample data for the following keywords:", ', '.join(keywords)
today = datetime.now()
for i in range(1000):
d = today + timedelta(randint(-7, -1), randint(-3600*24, 3600*24))
client.add_log(d, 'test entry', 'here', [keywords[i % len(keywords)]])
| 28.380952 | 76 | 0.697987 |
f830e618925548200af372e7691ce927a36784c1
| 867 |
py
|
Python
|
registry/setup.py
|
fjrmoreews/bioshadock_client
|
26a1de6e130689b6385144253525c861d2a2199d
|
[
"Apache-2.0"
] | 1 |
2015-11-25T19:03:58.000Z
|
2015-11-25T19:03:58.000Z
|
registry/setup.py
|
fjrmoreews/bioshadock_client
|
26a1de6e130689b6385144253525c861d2a2199d
|
[
"Apache-2.0"
] | 2 |
2015-11-24T14:45:44.000Z
|
2015-11-26T15:28:30.000Z
|
registry/setup.py
|
fjrmoreews/bioshadock_client
|
26a1de6e130689b6385144253525c861d2a2199d
|
[
"Apache-2.0"
] | 1 |
2015-11-27T10:57:15.000Z
|
2015-11-27T10:57:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
# name of the lib
name='bioshadock_biotools',
# version
version='1.0.1',
packages=find_packages(),
author="Francois Moreews",
description="Import tool for biotools from Dockerfile",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"License :: Apache 2.0",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Topic :: Communications",
],
scripts = [
'parseDockerFile.py',
'registryClient.py'
],
install_requires = [
'lxml',
'requests>=2.7.0'
],
license="Apache 2.0",
)
| 18.847826 | 59 | 0.575548 |
f831926e75acbe42ce6d5e5261d3946d9b9dfea1
| 1,176 |
py
|
Python
|
_example/xor_embedded/make.py
|
backwardn/go-tflite
|
30f5e2a268d2eb053f758636609c5c379a3016b5
|
[
"MIT"
] | 3 |
2020-01-09T02:57:30.000Z
|
2020-07-17T15:56:50.000Z
|
_example/xor_embedded/make.py
|
backwardn/go-tflite
|
30f5e2a268d2eb053f758636609c5c379a3016b5
|
[
"MIT"
] | null | null | null |
_example/xor_embedded/make.py
|
backwardn/go-tflite
|
30f5e2a268d2eb053f758636609c5c379a3016b5
|
[
"MIT"
] | null | null | null |
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop
from tensorflow.lite.python import lite
X_train = np.array([[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0]])
Y_train = np.array([0.0,
1.0,
1.0,
0.0])
model = Sequential()
output_count_layer0 = 2
model.add(
Dense(
output_count_layer0,
input_shape=(2, ),
activation='sigmoid')) # Need to specify input shape for input layer
output_count_layer1 = 1
model.add(Dense(output_count_layer1, activation='linear'))
model.compile(
loss='mean_squared_error', optimizer=RMSprop(), metrics=['accuracy'])
BATCH_SIZE = 4
history = model.fit(
X_train, Y_train, batch_size=BATCH_SIZE, epochs=3600, verbose=1)
X_test = X_train
Y_test = Y_train
score = model.evaluate(X_test, Y_test, verbose=0)
model.save('xor_model.h5')
converter = lite.TFLiteConverter.from_keras_model_file('xor_model.h5')
tflite_model = converter.convert()
open('public/xor_model.tflite', 'wb').write(tflite_model)
| 30.947368 | 75 | 0.662415 |
f835c7244c8f288b00b860e6cef6f64c28c3ea69
| 473 |
py
|
Python
|
app/sso/user/models.py
|
ChristianKreuzberger/django-oauth-sso
|
b019e2e8232ae141b50b8270e79e0617e24f54bb
|
[
"MIT"
] | null | null | null |
app/sso/user/models.py
|
ChristianKreuzberger/django-oauth-sso
|
b019e2e8232ae141b50b8270e79e0617e24f54bb
|
[
"MIT"
] | null | null | null |
app/sso/user/models.py
|
ChristianKreuzberger/django-oauth-sso
|
b019e2e8232ae141b50b8270e79e0617e24f54bb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
| 21.5 | 73 | 0.649049 |
f837af8b513ac4ce60f3ce335c72f8849a0bd813
| 1,710 |
py
|
Python
|
src/fusanet_utils/features/base.py
|
fusa-project/fusa-net-utils
|
b8740c67c0c789889b7abce477c894d77c70a20c
|
[
"MIT"
] | null | null | null |
src/fusanet_utils/features/base.py
|
fusa-project/fusa-net-utils
|
b8740c67c0c789889b7abce477c894d77c70a20c
|
[
"MIT"
] | null | null | null |
src/fusanet_utils/features/base.py
|
fusa-project/fusa-net-utils
|
b8740c67c0c789889b7abce477c894d77c70a20c
|
[
"MIT"
] | null | null | null |
import logging
from abc import ABC, abstractmethod
from os.path import isfile, splitext
import pathlib
import torch
from .waveform import get_waveform
logger = logging.getLogger(__name__)
| 35.625 | 83 | 0.657895 |
f837f76576c4f735618a20e51681085aeb556de5
| 251 |
py
|
Python
|
led/hhh/rta2.py
|
tushar-tdm/osvi
|
813499162b7f487ccafa8c08d3e5bf6d05b074de
|
[
"CC-BY-3.0"
] | 3 |
2020-02-21T01:16:26.000Z
|
2020-07-12T08:06:11.000Z
|
led/hhh/rta2.py
|
tushar-tdm/osvi
|
813499162b7f487ccafa8c08d3e5bf6d05b074de
|
[
"CC-BY-3.0"
] | 6 |
2020-02-11T23:27:43.000Z
|
2022-03-11T23:34:39.000Z
|
led/hhh/rta2.py
|
tushar-tdm/osvi
|
813499162b7f487ccafa8c08d3e5bf6d05b074de
|
[
"CC-BY-3.0"
] | null | null | null |
import os
import sys
import serial
import time
import struct
ser = serial.Serial('/dev/ttyACM0',9600)
led = sys.argv[1]
act = sys.argv[2]
l = str(led)
"""a = str(act)"""
time.sleep(5)
ser.write(struct.pack(l.encode())
""" ser.write(l.encode()) """
| 14.764706 | 40 | 0.661355 |
f838fea76677e89d488005a23aab7f853eac184d
| 11,397 |
py
|
Python
|
app.py
|
KendraObika/Froggit
|
3734d74de6b7febabb6c1645b61e42928203cf63
|
[
"MIT"
] | null | null | null |
app.py
|
KendraObika/Froggit
|
3734d74de6b7febabb6c1645b61e42928203cf63
|
[
"MIT"
] | null | null | null |
app.py
|
KendraObika/Froggit
|
3734d74de6b7febabb6c1645b61e42928203cf63
|
[
"MIT"
] | null | null | null |
"""
Primary module for Froggit
This module contains the main controller class for the Froggit application. There
is no need for any additional classes in this module. If you need more classes, 99%
of the time they belong in either the lanes module or the models module. If you are
unsure about where a new class should go, post a question on Piazza.
Kendra Obika kao78
December 20 2020
"""
from consts import *
from game2d import *
from level import *
import introcs
from kivy.logger import Logger
# PRIMARY RULE: Froggit can only access attributes in level.py via getters/setters
# Froggit is NOT allowed to access anything in lanes.py or models.py.
| 41.443636 | 87 | 0.668246 |
f83913edc4b000ba4986205d63145c52269b4655
| 1,252 |
py
|
Python
|
utils.py
|
rsoorajs/deecubes-telegram-bot
|
223710eb117c1333fefcff22bcf473e89e41c769
|
[
"MIT"
] | 2 |
2017-10-08T19:02:01.000Z
|
2020-05-16T21:55:18.000Z
|
utils.py
|
rsoorajs/deecubes-telegram-bot
|
223710eb117c1333fefcff22bcf473e89e41c769
|
[
"MIT"
] | null | null | null |
utils.py
|
rsoorajs/deecubes-telegram-bot
|
223710eb117c1333fefcff22bcf473e89e41c769
|
[
"MIT"
] | 3 |
2018-08-05T18:36:58.000Z
|
2020-05-16T21:55:19.000Z
|
import logging
from functools import wraps
from PIL import Image, ImageFont, ImageDraw
from config import LIST_ALLOWED_USERS
| 25.04 | 90 | 0.683706 |
f83abdd41d8480514557524b539c95519e6c83ef
| 152 |
py
|
Python
|
__init__.py
|
cmt-qo/cm-flakes
|
c11f37b50b088cf5c876ef8a6161b7d8d775e99b
|
[
"MIT"
] | 6 |
2019-11-04T07:04:24.000Z
|
2021-02-10T21:35:00.000Z
|
__init__.py
|
cmt-qo/cm-flakes
|
c11f37b50b088cf5c876ef8a6161b7d8d775e99b
|
[
"MIT"
] | null | null | null |
__init__.py
|
cmt-qo/cm-flakes
|
c11f37b50b088cf5c876ef8a6161b7d8d775e99b
|
[
"MIT"
] | 2 |
2020-08-07T09:29:41.000Z
|
2021-02-10T21:35:05.000Z
|
from .Camera import *
from .GloveBox import *
from .Microscope import *
from .Stage import *
from .UserInterface import *
from .NeuralNetwork import *
| 25.333333 | 28 | 0.756579 |
f83ba25f5a20e6c46fa842756d48009b7d4b11f6
| 4,444 |
py
|
Python
|
neural_semigroups/mace4_semigroups_dataset.py
|
zarebulic/neural-semigroup-experiment
|
c554acb17d264ba810009f8b86c35ee9f8c4d1f4
|
[
"Apache-2.0"
] | 6 |
2020-04-05T23:24:54.000Z
|
2021-11-15T11:17:09.000Z
|
neural_semigroups/mace4_semigroups_dataset.py
|
zarebulic/neural-semigroup-experiment
|
c554acb17d264ba810009f8b86c35ee9f8c4d1f4
|
[
"Apache-2.0"
] | 23 |
2020-03-15T09:09:54.000Z
|
2022-03-29T22:32:23.000Z
|
neural_semigroups/mace4_semigroups_dataset.py
|
zarebulic/neural-semigroup-experiment
|
c554acb17d264ba810009f8b86c35ee9f8c4d1f4
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019-2021 Boris Shminke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sqlite3
from typing import Callable, Optional
import torch
from tqdm import tqdm
from neural_semigroups.semigroups_dataset import SemigroupsDataset
from neural_semigroups.utils import connect_to_db
| 34.71875 | 78 | 0.617912 |
f83bb94361c259b35e4ff208fa028f2496100f01
| 7,501 |
py
|
Python
|
samples/data_inspect_utils.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 245 |
2019-11-29T02:55:25.000Z
|
2022-03-30T07:30:18.000Z
|
samples/data_inspect_utils.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 24 |
2019-11-29T10:05:00.000Z
|
2022-03-30T07:16:06.000Z
|
samples/data_inspect_utils.py
|
FishLiuabc/centerpose
|
555d753cd82693476f91f78c53aa4147f5a83015
|
[
"MIT"
] | 45 |
2019-11-29T05:12:02.000Z
|
2022-03-21T02:20:36.000Z
|
from __future__ import absolute_import, division, print_function
import cv2
import random
import numpy as np
import colorsys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import Polygon
from skimage.measure import find_contours
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, keypoints, class_id=1, class_name='person',
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
show_keypoint=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: 1 for person
class_name: class name of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_id
score = scores[i] if scores is not None else None
label = class_name
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[i, :, :]
keypoint = keypoints[i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
if show_keypoint:
masked_image = apply_keypoint(masked_image, keypoint)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [num_instances, height, width]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[0], 4], dtype=np.int32)
for i in range(mask.shape[0]):
m = mask[i, :, :]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
| 35.382075 | 94 | 0.55046 |
f83bc822a6f47feb415380dd8f541756419c1e6c
| 265 |
py
|
Python
|
tests/conftest.py
|
sparkythehuman/sms-service--send-message
|
8f095ba181f1d42df3968fe34d5e20f30851e021
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
sparkythehuman/sms-service--send-message
|
8f095ba181f1d42df3968fe34d5e20f30851e021
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
sparkythehuman/sms-service--send-message
|
8f095ba181f1d42df3968fe34d5e20f30851e021
|
[
"MIT"
] | null | null | null |
import pytest
| 33.125 | 82 | 0.784906 |
f83c3a927ff9df79fe83f0ce7fdfd551b1c6f921
| 7,741 |
py
|
Python
|
dapy/filters/particle.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 11 |
2020-07-29T07:46:39.000Z
|
2022-03-17T01:28:07.000Z
|
dapy/filters/particle.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 1 |
2020-07-14T11:49:17.000Z
|
2020-07-29T07:43:22.000Z
|
dapy/filters/particle.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 10 |
2020-07-14T11:34:24.000Z
|
2022-03-07T09:08:12.000Z
|
"""Particle filters for inference in state space models."""
import abc
from typing import Tuple, Dict, Callable, Any, Optional
import numpy as np
from numpy.random import Generator
from scipy.special import logsumexp
from scipy.sparse import csr_matrix
from dapy.filters.base import AbstractEnsembleFilter
from dapy.models.base import AbstractModel
import dapy.ot as optimal_transport
| 44.745665 | 88 | 0.689058 |
f83d223baea30c7408f539bf887906161d4b99ea
| 1,477 |
py
|
Python
|
pokemon.py
|
bran-almeida/Pokemon_Game
|
061c9e1b53d8cbaa7366634535288bb2868d6885
|
[
"MIT"
] | null | null | null |
pokemon.py
|
bran-almeida/Pokemon_Game
|
061c9e1b53d8cbaa7366634535288bb2868d6885
|
[
"MIT"
] | null | null | null |
pokemon.py
|
bran-almeida/Pokemon_Game
|
061c9e1b53d8cbaa7366634535288bb2868d6885
|
[
"MIT"
] | null | null | null |
import random
| 27.867925 | 83 | 0.564658 |
f83da86dbe71993fb962e0b2187a7e3ca515bae8
| 2,254 |
py
|
Python
|
recipes/Python/577563_Vectorize_Operation/recipe-577563.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023 |
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/577563_Vectorize_Operation/recipe-577563.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32 |
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/577563_Vectorize_Operation/recipe-577563.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780 |
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
"""
Copyright 2011 Shao-Chuan Wang <shaochuan.wang AT gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
from itertools import imap, repeat
import functools
iterable = lambda obj: isinstance(obj, basestring) or hasattr(obj, '__iter__')
vector_add = functools.partial(vector_op, operator.add)
vector_sub = functools.partial(vector_op, operator.sub)
vector_mul = functools.partial(vector_op, operator.mul)
vector_div = functools.partial(vector_op, operator.div)
vector_and = functools.partial(vector_op, operator.and_)
vector_or = functools.partial(vector_op, operator.or_)
if __name__ == '__main__':
positions = [(1,2,1), (3,4,3), (5,6,3)]
print vector_sum(positions)
print vector_mean(positions)
| 40.25 | 81 | 0.732476 |
f83f6977354074227de8507f3a2a55a87f9d6abe
| 5,752 |
py
|
Python
|
sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6 |
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2 |
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BranchConfigurationToolsets):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.497436 | 113 | 0.595271 |
f840464edc80ddc50844d1de4a6669b63272a7ea
| 1,156 |
py
|
Python
|
tests/cli/version_test.py
|
longhuei/floyd-cli
|
82709f1e301d7a56ac354e4615a354e2c36d71b8
|
[
"Apache-2.0"
] | 162 |
2017-01-27T02:54:17.000Z
|
2022-03-03T09:06:28.000Z
|
tests/cli/version_test.py
|
longhuei/floyd-cli
|
82709f1e301d7a56ac354e4615a354e2c36d71b8
|
[
"Apache-2.0"
] | 79 |
2017-02-17T08:58:39.000Z
|
2021-05-29T09:24:31.000Z
|
tests/cli/version_test.py
|
longhuei/floyd-cli
|
82709f1e301d7a56ac354e4615a354e2c36d71b8
|
[
"Apache-2.0"
] | 43 |
2017-02-23T10:58:42.000Z
|
2022-01-17T10:29:31.000Z
|
from click.testing import CliRunner
import unittest
from mock import patch, Mock, PropertyMock
from floyd.cli.version import upgrade
| 32.111111 | 98 | 0.702422 |
f8423088619bdfe61a95a3f318f27fab6ca0c75a
| 4,181 |
py
|
Python
|
offthedialbot/help.py
|
DJam98/bot
|
366a46bcca55098e1030a4f05d63e8872a791bf8
|
[
"MIT"
] | 2 |
2020-08-31T15:45:07.000Z
|
2021-09-26T22:15:43.000Z
|
offthedialbot/help.py
|
DJam98/bot
|
366a46bcca55098e1030a4f05d63e8872a791bf8
|
[
"MIT"
] | 17 |
2020-06-02T02:29:48.000Z
|
2021-10-13T23:47:44.000Z
|
offthedialbot/help.py
|
DJam98/bot
|
366a46bcca55098e1030a4f05d63e8872a791bf8
|
[
"MIT"
] | 3 |
2020-05-31T23:17:10.000Z
|
2022-03-09T22:23:22.000Z
|
"""Contains HelpCommand class."""
import discord
from discord.ext import commands
from offthedialbot import utils
def short(self, command, doc=True):
"""List the command as a one-liner."""
sig = self.get_command_signature(command) if not doc else f'{self.clean_prefix}{command}'
return f'`{sig[:-1] if sig.endswith(" ") else sig}` {(command.short_doc if doc else "")}'
help_command = HelpCommand()
| 38.712963 | 165 | 0.577613 |
f8430cf263194ac34b0078e29e9eec8808714370
| 255 |
py
|
Python
|
ex10.6.py
|
Dikaeinstein/Think_Python
|
370cb5af25230ff20994206e2d8023fd1d4c2c74
|
[
"MIT"
] | null | null | null |
ex10.6.py
|
Dikaeinstein/Think_Python
|
370cb5af25230ff20994206e2d8023fd1d4c2c74
|
[
"MIT"
] | null | null | null |
ex10.6.py
|
Dikaeinstein/Think_Python
|
370cb5af25230ff20994206e2d8023fd1d4c2c74
|
[
"MIT"
] | null | null | null |
def is_anagram ( word1, word2 ):
'''
Returns True if word1 is 'anagram' of word2 or False if otherwise.
word1: str
word2: str
'''
return sorted(word1) == sorted(word2)
print(is_anagram("silence", "listen"))
| 19.615385 | 70 | 0.576471 |
f8433cd21799446edb00e1ccf569de9f138f3e9c
| 3,017 |
py
|
Python
|
learning/modules/resnet/resnet_conditional.py
|
esteng/guiding-multi-step
|
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
|
[
"BSD-2-Clause"
] | null | null | null |
learning/modules/resnet/resnet_conditional.py
|
esteng/guiding-multi-step
|
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
|
[
"BSD-2-Clause"
] | null | null | null |
learning/modules/resnet/resnet_conditional.py
|
esteng/guiding-multi-step
|
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
|
[
"BSD-2-Clause"
] | null | null | null |
import torch
from torch import nn as nn
from learning.modules.blocks import ResBlock, ResBlockConditional
| 46.415385 | 86 | 0.552536 |
f8459ec6a60f2e71cf7db3476a3460a08e1783eb
| 110 |
wsgi
|
Python
|
cryptovote/cryptovote.wsgi
|
cryptovoting/cryptovote
|
b236cf031a8f9dfa5cca54ff45003313275a0fc8
|
[
"MIT"
] | 8 |
2019-05-14T02:41:34.000Z
|
2021-11-25T08:07:22.000Z
|
cryptovote/cryptovote.wsgi
|
cryptovoting/cryptovote
|
b236cf031a8f9dfa5cca54ff45003313275a0fc8
|
[
"MIT"
] | null | null | null |
cryptovote/cryptovote.wsgi
|
cryptovoting/cryptovote
|
b236cf031a8f9dfa5cca54ff45003313275a0fc8
|
[
"MIT"
] | 2 |
2019-05-14T20:20:07.000Z
|
2021-11-25T08:07:24.000Z
|
# Used for deploying on Apache with mod_wsgi
from cryptovote.app import create_app
application = create_app()
| 27.5 | 44 | 0.818182 |
f845c07a85b4945884e014911b73cc010e95c5c2
| 802 |
py
|
Python
|
problems/203_remove-linked-list-elements.py
|
okuda-seminar/review_leetcode
|
9774dbb85b836c3ebab4b24d77774ed05abb7a32
|
[
"MIT"
] | null | null | null |
problems/203_remove-linked-list-elements.py
|
okuda-seminar/review_leetcode
|
9774dbb85b836c3ebab4b24d77774ed05abb7a32
|
[
"MIT"
] | 170 |
2021-05-11T14:03:05.000Z
|
2021-11-30T14:22:52.000Z
|
problems/203_remove-linked-list-elements.py
|
ryuji0123/review_leetcode
|
9774dbb85b836c3ebab4b24d77774ed05abb7a32
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=203 lang=python3
#
# [203] Remove Linked List Elements
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# @lc code=end
| 23.588235 | 67 | 0.55985 |
f8470708904f8b5b4aa1dabc0a1785bf58a61c23
| 7,178 |
py
|
Python
|
qpricesim/model_code/QLearningAgent.py
|
ToFeWe/qpricesim
|
2d4312ed1d1356449f0c168835a0662b238a27bb
|
[
"MIT"
] | 2 |
2022-03-22T12:16:37.000Z
|
2022-03-22T12:48:46.000Z
|
qpricesim/model_code/QLearningAgent.py
|
ToFeWe/qpricesim
|
2d4312ed1d1356449f0c168835a0662b238a27bb
|
[
"MIT"
] | null | null | null |
qpricesim/model_code/QLearningAgent.py
|
ToFeWe/qpricesim
|
2d4312ed1d1356449f0c168835a0662b238a27bb
|
[
"MIT"
] | null | null | null |
"""
A module that defines the QLearning Agent for the pricing game as a class.
Note that we have a numba version (for speed) which inherits everything from
QLearningAgentBase.
"""
import numpy as np
from numba import float64
from numba import int32
from numba import njit
from numba.experimental import jitclass
from .utils_q_learning import numba_argmax
from .utils_q_learning import numba_max
spec = [
("n_actions", int32),
("n_states", int32),
("_qvalues", float64[:, :]),
("alpha", float64),
("epsilon", float64),
("discount", float64),
]
def jitclass_to_baseclass(agent_jit):
"""
A helper function to create a new QLearningAgentBase
object from the jitclass equivalent. This is needed
as we cannot serialize jitclasses in the current
numba version.
The function takes all parameters from the QLearningAgent
*agent_jit* and rewrites it to a new QLearningAgentBase
object.
Args:
agent_jit (QLearningAgent): jitclass instance of agent
Returns:
QLearningAgentBase: Serializable version of the agent
"""
agent_nojit = QLearningAgentBase(
alpha=agent_jit.alpha,
epsilon=agent_jit.epsilon,
discount=agent_jit.discount,
n_actions=agent_jit.n_actions,
n_states=agent_jit.n_states,
)
agent_nojit.set_qmatrix(new_matrix=agent_jit.get_qmatrix())
return agent_nojit
| 31.621145 | 97 | 0.633185 |
f848ba579a50c6fd3ee1c43bc3d139711769e3be
| 2,825 |
py
|
Python
|
Code/Test Code/UI Tests/move.py
|
mwyoung/Cornhole-Robot
|
830289fa30619ccec634b84b7cd81177e6b7740c
|
[
"MIT"
] | null | null | null |
Code/Test Code/UI Tests/move.py
|
mwyoung/Cornhole-Robot
|
830289fa30619ccec634b84b7cd81177e6b7740c
|
[
"MIT"
] | null | null | null |
Code/Test Code/UI Tests/move.py
|
mwyoung/Cornhole-Robot
|
830289fa30619ccec634b84b7cd81177e6b7740c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# with help from teleop_keyboard.py,
# https://github.com/ros-teleop/teleop_twist_keyboard/blob/master/teleop_twist_keyboard.py
# Graylin Trevor Jay and Austin Hendrix, BSD licensed
import roslib; #roslib.load_manifest('teleop_move')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty
starting_msg = """Move with:
i
j k l
(or wasd, space to stop)
CTRL-C to quit
"""
movement={
'i':(1,0,0,0),
'j':(0,0,0,1),
'k':(0,0,0,-1),
'l':(-1,0,0,0),
'w':(1,0,0,0),
'a':(0,0,0,1),
's':(0,0,0,-1),
'd':(-1,0,0,0),
' ':(0,0,0,0),
}
if __name__=="__main__":
main()
| 25 | 92 | 0.524602 |
f84a7601115fccffa87d1679d8be58c1f83890a1
| 1,561 |
py
|
Python
|
stanCode_Projects/my_photoshop/shrink.py
|
wilson51678/sc-projects
|
a4b9a0c542449372181f6bd20d4ad81b87bfcb46
|
[
"MIT"
] | null | null | null |
stanCode_Projects/my_photoshop/shrink.py
|
wilson51678/sc-projects
|
a4b9a0c542449372181f6bd20d4ad81b87bfcb46
|
[
"MIT"
] | null | null | null |
stanCode_Projects/my_photoshop/shrink.py
|
wilson51678/sc-projects
|
a4b9a0c542449372181f6bd20d4ad81b87bfcb46
|
[
"MIT"
] | null | null | null |
"""
File: shrink.py
ame: Wilson Wang 2020/08/05
-------------------------------
Create a new "out" image half the width and height of the original.
Set pixels at x=0 1 2 3 in out , from x=0 2 4 6 in original,
and likewise in the y direction.
"""
from simpleimage import SimpleImage
def shrink(filename):
"""
This function should shrink the 'filename' image into a 1/2 size new image.
:param filename: img, the image of origin size
:return img: new_img, the image of half size of the origin photo
"""
img = SimpleImage(filename)
# This step should makes a blank photo, which has half size of the origin photo
new_img = SimpleImage.blank(img.width//2,img.height//2)
for y in range(new_img.height):
for x in range(new_img.width):
# This step catch pixel in origin photo in every two pixel. x=0,2,4,6
img_pixel = img.get_pixel(x*2,y*2)
new_img_pixel = new_img.get_pixel(x,y)
# These three steps are filling pixels from the origin photo into 'new_pixel'
new_img_pixel.red = img_pixel.red
new_img_pixel.green = img_pixel.green
new_img_pixel.blue = img_pixel.blue
return new_img
def main():
"""
This program should shrink any image into a half size photo. 'without code:make_as_big_as'
"""
original = SimpleImage("images/poppy.png")
original.show()
after_shrink = shrink("images/poppy.png")
after_shrink.show()
if __name__ == '__main__':
main()
| 31.857143 | 95 | 0.632287 |
f84a986b558a36ee9782c5da91c77b0601aa7b43
| 15,349 |
py
|
Python
|
src/genie/libs/parser/iosxe/show_ip_dhcp.py
|
komurzak-cisco/genieparser
|
e6cd6bb133bab7260b2b82da198fd14a4dec66c7
|
[
"Apache-2.0"
] | 1 |
2021-07-26T02:56:27.000Z
|
2021-07-26T02:56:27.000Z
|
src/genie/libs/parser/iosxe/show_ip_dhcp.py
|
zhangineer/genieparser
|
d6abcb49bf6d39092d835d9490d817452920ae98
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/show_ip_dhcp.py
|
zhangineer/genieparser
|
d6abcb49bf6d39092d835d9490d817452920ae98
|
[
"Apache-2.0"
] | null | null | null |
"""
show ip dhcp database
show ip dhcp snooping database
show ip dhcp snooping database detail
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (Schema, Any, Optional,
Or, And, Default, Use)
# Parser Utils
from genie.libs.parser.utils.common import Common
# =======================================
# Schema for 'show ip dhcp database'
# =======================================
# =======================================
# Parser for 'show ip dhcp database'
# =======================================
# ===================================================
# Schema for 'show ip dhcp snooping database'
# 'show ip dhcp snooping database detail'
# ===================================================
# ===================================================
# Parser for 'show ip dhcp snooping database'
# ===================================================
# ===================================================
# Parser for 'show ip dhcp snooping database detail'
# ===================================================
| 37.436585 | 133 | 0.485699 |
f84ba2d7e5aa592c0ac62dbc711d229b2f13adeb
| 848 |
py
|
Python
|
vpc_hyp2/Createservers.py
|
dhanraj-vedanth/IaaS_VPC_CDN
|
262dbc7db63d5e76398dadc8015256fb37986e36
|
[
"MIT"
] | null | null | null |
vpc_hyp2/Createservers.py
|
dhanraj-vedanth/IaaS_VPC_CDN
|
262dbc7db63d5e76398dadc8015256fb37986e36
|
[
"MIT"
] | null | null | null |
vpc_hyp2/Createservers.py
|
dhanraj-vedanth/IaaS_VPC_CDN
|
262dbc7db63d5e76398dadc8015256fb37986e36
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import ipaddress
import paramiko
br=sys.argv[1]
r=sys.argv[2]
IP=sys.argv[3]
func_createcont(br,r,IP)
| 33.92 | 78 | 0.650943 |
f84d7afc084777032cfb27a9f3d492736584d51d
| 1,051 |
py
|
Python
|
backend/flaskr/__init__.py
|
DakyungAndEunji/2021-ICE-Capstone-Project
|
71761bf66bd170eae48a8084331ed1d00f9c184b
|
[
"MIT"
] | 1 |
2021-05-11T04:08:58.000Z
|
2021-05-11T04:08:58.000Z
|
backend/flaskr/__init__.py
|
DakyungAndEunji/2021-ICE-Capstone-Project
|
71761bf66bd170eae48a8084331ed1d00f9c184b
|
[
"MIT"
] | 11 |
2021-04-06T15:22:47.000Z
|
2021-06-01T05:13:43.000Z
|
backend/flaskr/__init__.py
|
DakyungAndEunji/2021-ICE-Capstone-Project
|
71761bf66bd170eae48a8084331ed1d00f9c184b
|
[
"MIT"
] | null | null | null |
### flaskr/__init__.py
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
| 26.948718 | 104 | 0.698382 |
f84fd6a36061acc80024ef6237230dcd9e8feabc
| 7,228 |
py
|
Python
|
backend/ec2.py
|
yubinhong/AutoAws
|
92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d
|
[
"MIT"
] | 1 |
2020-02-21T07:40:46.000Z
|
2020-02-21T07:40:46.000Z
|
backend/ec2.py
|
yubinhong/AutoAws
|
92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d
|
[
"MIT"
] | null | null | null |
backend/ec2.py
|
yubinhong/AutoAws
|
92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d
|
[
"MIT"
] | null | null | null |
import boto3
import time
if __name__ == "__main__":
ec2 = AwsEc2("", "")
res = ec2.get_instance_by_resource('xxxxxx')
for i in res:
print(i.placement)
| 34.419048 | 119 | 0.445075 |
f851380879e61799e28a7ffd91239a32f370bf71
| 2,299 |
py
|
Python
|
control/voiceControl.py
|
Lluxent/CorporateClashUtility
|
36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272
|
[
"MIT"
] | 2 |
2021-03-08T02:30:58.000Z
|
2021-03-17T12:57:33.000Z
|
control/voiceControl.py
|
Lluxent/CorporateClashUtility
|
36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272
|
[
"MIT"
] | null | null | null |
control/voiceControl.py
|
Lluxent/CorporateClashUtility
|
36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272
|
[
"MIT"
] | null | null | null |
import control
import speech_recognition as sr
def recognize_speech_from_mic(recognizer, microphone):
"""Transcribe speech from recorded from `microphone`.
Returns a dictionary with three keys:
"success": a boolean indicating whether or not the API request was
successful
"error": `None` if no error occured, otherwise a string containing
an error message if the API could not be reached or
speech was unrecognizable
"transcription": `None` if speech could not be transcribed,
otherwise a string containing the transcribed text
"""
# check that recognizer and microphone arguments are appropriate type
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("`recognizer` must be `Recognizer` instance")
if not isinstance(microphone, sr.Microphone):
raise TypeError("`microphone` must be `Microphone` instance")
# adjust the recognizer sensitivity to ambient noise and record audio
# from the microphone
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
# set up the response object
response = {
"success" : True,
"error" : None,
"transcription" : None
}
# try recognizing the speech in the recording
# if a RequestError or UnknownValueError exception is caught, update the response object accordingly
try:
response["transcription"] = recognizer.recognize_google(audio)
except sr.RequestError:
# API was unreachable or unresponsive
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
# speech was unintelligible
response["error"] = "Unable to recognize speech"
return response
r = sr.Recognizer()
m = sr.Microphone()
while(True):
while(True):
print('Listening... ')
arg = recognize_speech_from_mic(r, m)
if arg["transcription"]:
break
if not arg["success"]:
break
if arg["error"]:
print('Error! {}'.format(arg["error"]))
pass
print('Heard: {}'.format(arg["transcription"]))
control.doAction(str.lower(arg["transcription"]))
| 33.808824 | 104 | 0.653763 |
f85295b6cbccfde4504d51121948d6ed5ff3e3c4
| 6,721 |
py
|
Python
|
lookatweb/rules/objects.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | 2 |
2018-01-18T13:22:29.000Z
|
2018-02-03T13:10:20.000Z
|
lookatweb/rules/objects.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | null | null | null |
lookatweb/rules/objects.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | null | null | null |
from .consts import *
# Object matching by classid
OBJECTS_CLSID_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:D27CDB6E-AE6D-11cf-96B8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:d27cdb6e-ae6d-11cf-96b8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:-D27CDB6E-AE6D-11cf-96B8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:6BF52A52-394A-11D3-B153-00C04F79FAA6',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA',
'entities' : [
{'name' : 'web:tech:activex/realplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA',
'entities' : [
{'name' : 'web:tech:activex/realplayer'}
]
},
]
# match object tags by type
OBJECTS_TYPE_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-silverlight-2',
'entities' : [
{'name' : 'web:tech/silverlight'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-shockwave-flash',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-oleobject',
'entities' : [
{'name' : 'web:tech/activex'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'image/svg+xml',
'entities' : [
{'name' : 'web:tech/svg'}
]
},
]
# match object tags by data
OBJECTS_DATA_RULES = [
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.yandex\.net/i/time/clock\.swf',
'entities' : [
{'name' : 'web:widgets:clock/yandexclock'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com',
'entities' : [
{'name' : 'web:media:video/vimeo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com',
'entities' : [
{'name' : 'web:media:video/youtube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://cdn\.last\.fm/widgets/chart',
'entities' : [
{'name' : 'web:widgets:audio/lastfm'}
]
},
]
# match object tags by embed src
EMBED_SRC_RULES = [
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.mail\.ru/r/video2/player_v2\.swf',
'entities' : [
{'name' : 'web:media:video/mailru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://flv\.video\.yandex\.ru',
'entities' : [
{'name' : 'web:media:video/yandex'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.gismeteo\.ru/flash',
'entities' : [
{'name' : 'web:widgets:meteo/gismeteo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.clocklink\.com/clocks/',
'entities' : [
{'name' : 'web:widgets:time/clocklink'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'http://iii.ru/static/Vishnu.swf',
'entities' : [
{'name' : 'web:widgets:chat/iiiru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://[a-z0-9]{1,3}\.videos\.sapo\.pt/play',
'entities' : [
{'name' : 'web:media:video/sapovideos'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'http://pub.tvigle.ru/swf/tvigle_single_v2.swf',
'entities' : [
{'name' : 'web:media:video/twigle'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://rpod\.ru/i/b/listen_240x400_01/core\.swf',
'entities' : [
{'name' : 'web:media:audio/rpodru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vision\.rambler\.ru/i/e\.swf',
'entities' : [
{'name' : 'web:media:video/ramblervision'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.smotri\.com/scrubber_custom8\.swf',
'entities' : [
{'name' : 'web:media:video/smotricom'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.russia\.ru/player/main\.swf',
'entities' : [
{'name' : 'web:media:video/russiaru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://video\.google\.(com|ru|ca|de)/googleplayer.swf',
'entities' : [
{'name' : 'web:media:video/googlevideo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com/v/',
'entities' : [
{'name' : 'web:media:video/youtube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/templates/',
'entities' : [
{'name' : 'web:cms/bitrix'},
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/components/',
'entities' : [
{'name' : 'web:cms/bitrix'},
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://developer\.truveo\.com/apps/listWidget',
'entities' : [
{'name' : 'web:media:video/truveo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.rbc\.ru/informer',
'entities' : [
{'name' : 'web:widgets:fin/rbcinformer'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://video\.rutube\.ru',
'entities' : [
{'name' : 'web:media:video/rutube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://static\.twitter\.com/flash/widgets/profile/TwitterWidget\.swf',
'entities' : [
{'name' : 'web:widgets:blog/twitter'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com/moogaloop.swf',
'entities' : [
{'name' : 'web:media:video/vimeo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www.1tv.ru/(n|p)video',
'entities' : [
{'name' : 'web:media:video/1tvru'}
]
},
]
| 30.139013 | 112 | 0.494867 |
f856a06399d0483aa5762d750435935c90b3dd55
| 6,020 |
py
|
Python
|
src/failprint/cli.py
|
pawamoy/woof
|
5c8eccfe5c1343b5a399b5794c486b3c0de67c78
|
[
"0BSD"
] | 6 |
2020-10-14T07:22:31.000Z
|
2022-02-13T23:17:56.000Z
|
src/failprint/cli.py
|
pawamoy/woof
|
5c8eccfe5c1343b5a399b5794c486b3c0de67c78
|
[
"0BSD"
] | 10 |
2020-04-29T12:29:43.000Z
|
2021-07-31T10:35:36.000Z
|
src/failprint/cli.py
|
pawamoy/woof
|
5c8eccfe5c1343b5a399b5794c486b3c0de67c78
|
[
"0BSD"
] | 1 |
2021-08-07T03:23:41.000Z
|
2021-08-07T03:23:41.000Z
|
# Why does this file exist, and why not put this in `__main__`?
#
# You might be tempted to import things from `__main__` later,
# but that will cause problems: the code will get executed twice:
#
# - When you run `python -m failprint` python will execute
# `__main__.py` as a script. That means there won't be any
# `failprint.__main__` in `sys.modules`.
# - When you import `__main__` it will get executed again (as a module) because
# there's no `failprint.__main__` in `sys.modules`.
"""Module that contains the command line application."""
import argparse
from typing import List, Optional, Sequence
from failprint.capture import Capture
from failprint.formats import accept_custom_format, formats
from failprint.runners import run
def add_flags(parser, set_defaults=True) -> ArgParser:
"""
Add some boolean flags to the parser.
We made this method separate and public
for its use in [duty](https://github.com/pawamoy/duty).
Arguments:
parser: The parser to add flags to.
set_defaults: Whether to set default values on arguments.
Returns:
The augmented parser.
"""
# IMPORTANT: the arguments destinations should match
# the parameters names of the failprint.runners.run function.
# As long as names are consistent between the two,
# it's very easy to pass CLI args to the function,
# and it also allows to avoid duplicating the parser arguments
# in dependent projects like duty (https://github.com/pawamoy/duty) :)
parser.add_argument(
"-c",
"--capture",
choices=list(Capture),
type=Capture,
help="Which output to capture. Colors are supported with 'both' only, unless the command has a 'force color' option.",
)
parser.add_argument(
"-f",
"--fmt",
"--format",
dest="fmt",
choices=formats.keys(),
type=accept_custom_format,
default=None,
help="Output format. Pass your own Jinja2 template as a string with '-f custom=TEMPLATE'. "
"Available variables: command, title (command or title passed with -t), code (exit status), "
"success (boolean), failure (boolean), number (command number passed with -n), "
"output (command output), nofail (boolean), quiet (boolean), silent (boolean). "
"Available filters: indent (textwrap.indent).",
)
parser.add_bool_argument(
["-y", "--pty"],
["-Y", "--no-pty"],
dest="pty",
default=True if set_defaults else None,
truthy_help="Enable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.",
falsy_help="Disable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.",
)
parser.add_bool_argument(
["-p", "--progress"],
["-P", "--no-progress"],
dest="progress",
default=True if set_defaults else None,
truthy_help="Print progress while running a command.",
falsy_help="Don't print progress while running a command.",
)
parser.add_bool_argument(
["-q", "--quiet"],
["-Q", "--no-quiet"],
dest="quiet",
default=False if set_defaults else None,
truthy_help="Don't print the command output, even if it failed.",
falsy_help="Print the command output when it fails.",
)
parser.add_bool_argument(
["-s", "--silent"],
["-S", "--no-silent"],
dest="silent",
default=False if set_defaults else None,
truthy_help="Don't print anything.",
falsy_help="Print output as usual.",
)
parser.add_bool_argument(
["-z", "--zero", "--nofail"],
["-Z", "--no-zero", "--strict"],
dest="nofail",
default=False if set_defaults else None,
truthy_help="Don't fail. Always return a success (0) exit code.",
falsy_help="Return the original exit code.",
)
return parser
def get_parser() -> ArgParser:
"""
Return the CLI argument parser.
Returns:
An argparse parser.
"""
parser = add_flags(ArgParser(prog="failprint"))
parser.add_argument("-n", "--number", type=int, default=1, help="Command number. Useful for the 'tap' format.")
parser.add_argument("-t", "--title", help="Command title. Default is the command itself.")
parser.add_argument("cmd", metavar="COMMAND", nargs="+")
return parser
def main(args: Optional[List[str]] = None) -> int:
"""
Run the main program.
This function is executed when you type `failprint` or `python -m failprint`.
Arguments:
args: Arguments passed from the command line.
Returns:
An exit code.
"""
parser = get_parser()
opts = parser.parse_args(args).__dict__.items() # noqa: WPS609
return run(**{_: value for _, value in opts if value is not None}).code
| 36.707317 | 126 | 0.635382 |
f858848401df27fd04f2c1792b618ab879328af0
| 1,112 |
py
|
Python
|
siqbal/siqbal/doctype/item_label/item_label.py
|
smehata/siqbal
|
8b6a21fb63c050237593c49757065198c0e2c54a
|
[
"MIT"
] | 1 |
2021-08-07T12:48:02.000Z
|
2021-08-07T12:48:02.000Z
|
siqbal/siqbal/doctype/item_label/item_label.py
|
smehata/siqbal
|
8b6a21fb63c050237593c49757065198c0e2c54a
|
[
"MIT"
] | null | null | null |
siqbal/siqbal/doctype/item_label/item_label.py
|
smehata/siqbal
|
8b6a21fb63c050237593c49757065198c0e2c54a
|
[
"MIT"
] | 4 |
2021-01-16T06:14:58.000Z
|
2022-02-07T06:36:41.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, RC and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
| 37.066667 | 116 | 0.732914 |
f859b964e5f9c3a181c35199baa7176223613982
| 1,308 |
py
|
Python
|
www.py
|
MurphyWan/Python_Flask
|
7ef61c8242b4edf05a1ce8c688564e7895017a76
|
[
"MIT"
] | 1 |
2019-01-05T12:35:51.000Z
|
2019-01-05T12:35:51.000Z
|
www.py
|
MurphyWan/Python_Flask
|
7ef61c8242b4edf05a1ce8c688564e7895017a76
|
[
"MIT"
] | null | null | null |
www.py
|
MurphyWan/Python_Flask
|
7ef61c8242b4edf05a1ce8c688564e7895017a76
|
[
"MIT"
] | null | null | null |
# coding:utf-8
# author:MurphyWan
# www.py
""" controller.py/index.py
from flask import Blueprint
route_index = Blueprint('index_page', __name__)
@route_index.route("/")
def index():
return "Hello World"
"""
from application import app
'''
'''
from web.interceptors.Authinterceptor import *
'''
url
'''
from web.controllers.index import route_index
from web.controllers.user.User import route_user
from web.controllers.static import route_static
from web.controllers.account.Account import route_account #
from web.controllers.food.Food import route_food #
from web.controllers.member.Member import route_member #
from web.controllers.finance.Finance import route_finance #
from web.controllers.stat.Stat import route_stat #
#
app.register_blueprint(route_index, url_prefix='/')
# manager.pywww.pywww
app.register_blueprint(route_user, url_prefix='/user')
app.register_blueprint(route_static, url_prefix='/static')
app.register_blueprint(route_account, url_prefix='/account')
app.register_blueprint(route_food, url_prefix='/food')
app.register_blueprint(route_member, url_prefix='/member')
app.register_blueprint(route_finance, url_prefix='/finance')
app.register_blueprint(route_stat, url_prefix='/stat')
| 29.727273 | 65 | 0.798165 |
f85a24e0d9a829e5ba4097a173e5c180ffe2795f
| 1,410 |
py
|
Python
|
Summarizing-Data-with-statistics-/code.py
|
Tushar23dhongade/ga-learner-dsmp-repo
|
cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1
|
[
"MIT"
] | null | null | null |
Summarizing-Data-with-statistics-/code.py
|
Tushar23dhongade/ga-learner-dsmp-repo
|
cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1
|
[
"MIT"
] | null | null | null |
Summarizing-Data-with-statistics-/code.py
|
Tushar23dhongade/ga-learner-dsmp-repo
|
cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1
|
[
"MIT"
] | null | null | null |
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data=pd.read_csv(path)
data["Gender"].replace("-","Agender",inplace=True)
gender_count=data.Gender.value_counts()
gender_count.plot(kind="bar")
#Code starts here
# --------------
#Code starts here
alignment=data.Alignment.value_counts()
plt.pie(alignment,labels=["good","bad","newutral"])
# --------------
#Code starts here
sc_df=data[["Strength","Combat"]]
sc_covariance=sc_df.cov().iloc[0,1]
sc_strength=sc_df.Strength.std()
sc_combat=sc_df.Combat.std()
sc_pearson=sc_covariance/(sc_strength*sc_combat)
print(sc_pearson)
ic_df=data[["Intelligence","Combat"]]
ic_covariance=ic_df.cov().iloc[0,1]
ic_intelligence=ic_df.Intelligence.std()
ic_combat=ic_df.Combat.std()
ic_pearson=ic_covariance/(ic_intelligence*ic_combat)
print(ic_pearson)
# --------------
#Code starts here
total_high=data.Total.quantile(0.99)
super_best=data[data.Total>total_high]
super_best_names=list(super_best.Name)
print(super_best_names)
# --------------
#Code starts here
Intelligence, ax_1 = plt.subplots()
ax_1.boxplot(data.Intelligence)
ax_1.set_title('Intelligence')
Speed, ax_2 = plt.subplots()
ax_2.boxplot(data.Speed)
ax_2.set_title('Speed')
Power, ax_3 = plt.subplots()
ax_3.boxplot(data.Power)
ax_3.set_title('Power')
| 20.434783 | 53 | 0.698582 |
f85c11db5b31e7e4088a63d0697d91e4986e3c85
| 6,962 |
py
|
Python
|
soc/python/checkDB.py
|
idea-fasoc/fasoc
|
5a1fc8cf980b24a48b17f4447f13fb50d49e366a
|
[
"MIT"
] | 48 |
2019-09-16T09:49:54.000Z
|
2022-02-09T20:59:10.000Z
|
soc/python/checkDB.py
|
idea-fasoc/fasoc
|
5a1fc8cf980b24a48b17f4447f13fb50d49e366a
|
[
"MIT"
] | 18 |
2019-10-15T04:17:35.000Z
|
2021-05-25T00:12:52.000Z
|
soc/python/checkDB.py
|
idea-fasoc/fasoc
|
5a1fc8cf980b24a48b17f4447f13fb50d49e366a
|
[
"MIT"
] | 8 |
2019-10-15T17:27:41.000Z
|
2022-01-26T20:42:07.000Z
|
#!/usr/bin/env python3
#MIT License
#Copyright (c) 2018 The University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import shutil
import os
import json # json parsing
import zipfile
import sys
from modifyDBFiles import modifyDBFiles
| 44.063291 | 418 | 0.668342 |
f85e27ad10e7814b11be2c93c0c4dca76deac4ea
| 2,222 |
py
|
Python
|
Piquant/Debug/script/matlplotlib_pyplot实操代码.py
|
QuantPengPeng/Piquant
|
88047831a3ce4eb5b67fc68c752243084ba90199
|
[
"MIT"
] | 9 |
2019-04-07T06:17:50.000Z
|
2021-07-11T14:31:36.000Z
|
Piquant/Debug/script/matlplotlib_pyplot实操代码.py
|
QuantPengPeng/Piquant
|
88047831a3ce4eb5b67fc68c752243084ba90199
|
[
"MIT"
] | 1 |
2019-05-17T01:57:07.000Z
|
2019-11-19T01:57:05.000Z
|
Piquant/Debug/script/matlplotlib_pyplot实操代码.py
|
QuantPengPeng/Piquant
|
88047831a3ce4eb5b67fc68c752243084ba90199
|
[
"MIT"
] | 6 |
2019-04-15T07:17:26.000Z
|
2019-08-04T02:55:36.000Z
|
# coding: utf-8
# In[35]:
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
main_image=plt.figure(figsize=(10,10))
subplots_adjust(hspace=0.3,wspace=0.3)#
#1-
x_0=np.linspace(0,2*np.pi,20) #X
sub_image_1=plt.subplot(2,2,1)
plt.xlabel('X value')
plt.ylabel('Sin value')
plt.grid(True)
sub_image_1.plot(x_0, np.sin(x), 'r--o',label='Sin(x)')
sub_image_1.legend()#
sub_image_1.annotate('sin wave', xy=(3,0.25), xytext=(4,0.5), arrowprops=dict(facecolor='black',shrink=0.05))#
sub_image_1.set_title('Sin Waves')
#2-
x_1=np.linspace(0,2*np.pi,20)
sub_image_2=plt.subplot(2,2,2)
plt.xlabel('X value')
plt.ylabel('Cos and Sin value')
plt.grid(True)
sub_image_2.plot(x_1, np.cos(x), color='blue', linestyle='--',linewidth=1, marker='o', markerfacecolor='red', markersize='6', label='Cos(x)')
sub_image_2.plot(x_1, np.sin(x), color='green', linestyle='-.', linewidth=3, marker='^', markerfacecolor='yellow', markersize='8', label='Sin(x)')
sub_image_2.legend()
sub_image_2.set_title('Cos and Sin Waves')
#3-
bins_count=10
mu,sigma=100,20
x_hist=mu+sigma*np.random.randn(1000,1)#randn1000
sub_image_3=plt.subplot(2,2,3)
plt.xlabel('value')
plt.ylabel('count')
plt.grid(False)
tuple_return=sub_image_3.hist(x_hist, bins=bins_count, facecolor='red', alpha=0.8, edgecolor='black',normed=0)#normed=0normed=1
sub_image_3.set_title('Frequency Histogram')
plt.xlim((floor(x_hist.min()),ceil(x_hist.max())))
bar_width=(x_hist.max()-x_hist.min())/bins_count
plt.xticks(np.arange(floor(x_hist.min()),ceil(x_hist.max()),round(bar_width)))#
for i in range(bins_count):
sub_image_3.text(x_hist.min()+(bar_width*i)+(bar_width/2), tuple_return[0][i], str(tuple_return[0][i]), horizontalalignment='center', verticalalignment='bottom')
#3-
x_part_1=np.linspace(-10,-1,10)#
x_part_2=np.linspace(0,10,11)
sub_image_4=plt.subplot(2,2,4)
plt.xlabel('X value')
plt.ylabel('Y value')
plt.grid(False)
sub_image_4.plot(x_part_1,x_part_1*2+1,'b--o',label='y=2x+1')
sub_image_4.plot(x_part_2,x_part_2**2,'r--o',label='y=x^2')
sub_image_4.legend()
sub_image_4.set_title('PieceWise Function')
#
plt.show()
| 32.676471 | 165 | 0.729973 |
f85ead752c9700ddc5fb73af13b5441235631493
| 2,190 |
py
|
Python
|
gencode/python/udmi/schema/event_audit.py
|
johnrandolph/udmi
|
5e9de32fc71de8d006cda2eba4d3372eaf24c7c0
|
[
"Apache-2.0"
] | 1 |
2022-02-24T22:57:37.000Z
|
2022-02-24T22:57:37.000Z
|
gencode/python/udmi/schema/event_audit.py
|
johnrandolph/udmi
|
5e9de32fc71de8d006cda2eba4d3372eaf24c7c0
|
[
"Apache-2.0"
] | 5 |
2022-02-24T21:32:24.000Z
|
2022-03-23T15:52:25.000Z
|
gencode/python/udmi/schema/event_audit.py
|
johnrandolph/udmi
|
5e9de32fc71de8d006cda2eba4d3372eaf24c7c0
|
[
"Apache-2.0"
] | null | null | null |
"""Generated class for event_audit.json"""
from .common import Entry
| 23.548387 | 77 | 0.648402 |
f85f1ff5fdc55f6eaa86305ff1243afdf2c3c231
| 7,624 |
py
|
Python
|
colour/models/rgb.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | 1 |
2019-06-27T11:32:48.000Z
|
2019-06-27T11:32:48.000Z
|
colour/models/rgb.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RGB Colourspace Transformations
===============================
Defines the *RGB* colourspace transformations:
- :func:`XYZ_to_RGB`
- :func:`RGB_to_XYZ`
- :func:`RGB_to_RGB`
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.models import xy_to_XYZ
from colour.adaptation import chromatic_adaptation_matrix
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['XYZ_to_RGB',
'RGB_to_XYZ',
'RGB_to_RGB']
def XYZ_to_RGB(XYZ,
illuminant_XYZ,
illuminant_RGB,
to_RGB,
chromatic_adaptation_method='CAT02',
transfer_function=None):
"""
Converts from *CIE XYZ* colourspace to *RGB* colourspace using given
*CIE XYZ* colourspace matrix, *illuminants*, *chromatic adaptation* method,
*normalised primary matrix* and *transfer function*.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant_XYZ : array_like
*CIE XYZ* colourspace *illuminant* *xy* chromaticity coordinates.
illuminant_RGB : array_like
*RGB* colourspace *illuminant* *xy* chromaticity coordinates.
to_RGB : array_like, (3, 3)
*Normalised primary matrix*.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
transfer_function : object, optional
*Transfer function*.
Returns
-------
ndarray, (3,)
*RGB* colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 1].
- Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain
[0, 1].
- Input *illuminant_RGB* *xy* chromaticity coordinates are in domain
[0, 1].
- Output *RGB* colourspace matrix is in domain [0, 1].
Examples
--------
>>> XYZ = np.array([0.1151847498, 0.1008, 0.0508937252])
>>> illuminant_XYZ = (0.34567, 0.35850)
>>> illuminant_RGB = (0.31271, 0.32902)
>>> chromatic_adaptation_method = 'Bradford'
>>> to_RGB = np.array([
... [3.24100326, -1.53739899, -0.49861587],
... [-0.96922426, 1.87592999, 0.04155422],
... [0.05563942, -0.2040112, 1.05714897]])
>>> XYZ_to_RGB(
... XYZ,
... illuminant_XYZ,
... illuminant_RGB,
... to_RGB,
... chromatic_adaptation_method) # doctest: +ELLIPSIS
array([ 0.1730350..., 0.0821103..., 0.0567249...])
"""
np.array([
[3.24100326, -1.53739899, -0.49861587],
[-0.96922426, 1.87592999, 0.04155422],
[0.05563942, -0.2040112, 1.05714897]])
cat = chromatic_adaptation_matrix(xy_to_XYZ(illuminant_XYZ),
xy_to_XYZ(illuminant_RGB),
method=chromatic_adaptation_method)
adapted_XYZ = np.dot(cat, XYZ)
RGB = np.dot(to_RGB.reshape((3, 3)), adapted_XYZ.reshape((3, 1)))
if transfer_function is not None:
RGB = np.array([transfer_function(x) for x in np.ravel(RGB)])
return np.ravel(RGB)
def RGB_to_XYZ(RGB,
illuminant_RGB,
illuminant_XYZ,
to_XYZ,
chromatic_adaptation_method='CAT02',
inverse_transfer_function=None):
"""
Converts from *RGB* colourspace to *CIE XYZ* colourspace using given
*RGB* colourspace matrix, *illuminants*, *chromatic adaptation* method,
*normalised primary matrix* and *transfer function*.
Parameters
----------
RGB : array_like, (3,)
*RGB* colourspace matrix.
illuminant_RGB : array_like
*RGB* colourspace *illuminant* chromaticity coordinates.
illuminant_XYZ : array_like
*CIE XYZ* colourspace *illuminant* chromaticity coordinates.
to_XYZ : array_like, (3, 3)
*Normalised primary matrix*.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
inverse_transfer_function : object, optional
*Inverse transfer function*.
Returns
-------
ndarray, (3,)
*CIE XYZ* colourspace matrix.
Notes
-----
- Input *RGB* colourspace matrix is in domain [0, 1].
- Input *illuminant_RGB* *xy* chromaticity coordinates are in domain
[0, 1].
- Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain
[0, 1].
- Output *CIE XYZ* colourspace matrix is in domain [0, 1].
Examples
--------
>>> RGB = np.array([0.17303501, 0.08211033, 0.05672498])
>>> illuminant_RGB = (0.31271, 0.32902)
>>> illuminant_XYZ = (0.34567, 0.35850)
>>> chromatic_adaptation_method = 'Bradford'
>>> to_XYZ = np.array([
... [0.41238656, 0.35759149, 0.18045049],
... [0.21263682, 0.71518298, 0.0721802],
... [0.01933062, 0.11919716, 0.95037259]])
>>> RGB_to_XYZ(
... RGB,
... illuminant_RGB,
... illuminant_XYZ,
... to_XYZ,
... chromatic_adaptation_method) # doctest: +ELLIPSIS
array([ 0.1151847..., 0.1008 , 0.0508937...])
"""
if inverse_transfer_function is not None:
RGB = np.array([inverse_transfer_function(x)
for x in np.ravel(RGB)])
XYZ = np.dot(to_XYZ.reshape((3, 3)), RGB.reshape((3, 1)))
cat = chromatic_adaptation_matrix(
xy_to_XYZ(illuminant_RGB),
xy_to_XYZ(illuminant_XYZ),
method=chromatic_adaptation_method)
adapted_XYZ = np.dot(cat, XYZ.reshape((3, 1)))
return np.ravel(adapted_XYZ)
def RGB_to_RGB(RGB,
input_colourspace,
output_colourspace,
chromatic_adaptation_method='CAT02'):
"""
Converts from given input *RGB* colourspace to output *RGB* colourspace
using given *chromatic adaptation* method.
Parameters
----------
RGB : array_like, (3,)
*RGB* colourspace matrix.
input_colourspace : RGB_Colourspace
*RGB* input colourspace.
output_colourspace : RGB_Colourspace
*RGB* output colourspace.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
ndarray, (3,)
*RGB* colourspace matrix.
Notes
-----
- *RGB* colourspace matrices are in domain [0, 1].
Examples
--------
>>> from colour import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE
>>> RGB = np.array([0.35521588, 0.41, 0.24177934])
>>> RGB_to_RGB(
... RGB,
... sRGB_COLOURSPACE,
... PROPHOTO_RGB_COLOURSPACE) # doctest: +ELLIPSIS
array([ 0.3579334..., 0.4007138..., 0.2615704...])
"""
cat = chromatic_adaptation_matrix(
xy_to_XYZ(input_colourspace.whitepoint),
xy_to_XYZ(output_colourspace.whitepoint),
chromatic_adaptation_method)
trs_matrix = np.dot(output_colourspace.to_RGB,
np.dot(cat, input_colourspace.to_XYZ))
return np.dot(trs_matrix, RGB)
| 31.766667 | 115 | 0.613458 |
f85f4b7c7b491177a0f091a1844ac24655fff102
| 1,768 |
py
|
Python
|
tests/assign_folds_test.py
|
turku-rad-ai/pe-detection
|
d9b49800de45a40030db72db65f4806b23d97a63
|
[
"Apache-2.0"
] | null | null | null |
tests/assign_folds_test.py
|
turku-rad-ai/pe-detection
|
d9b49800de45a40030db72db65f4806b23d97a63
|
[
"Apache-2.0"
] | null | null | null |
tests/assign_folds_test.py
|
turku-rad-ai/pe-detection
|
d9b49800de45a40030db72db65f4806b23d97a63
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import pandas as pd
import pytest
from preprocessing.assign_folds import assign_folds
testdata = [
[
[
"patient1",
"patient2",
"patient3",
"patient4",
"patient5",
"patient6",
"patient7",
"patient8",
"patient9",
"patient1", # second 1
"patient3", # second 3
"patient10",
],
[
"image1.dcm",
"image2.dcm",
"image3.dcm",
"image4.dcm",
"image5.dcm",
"image6.dcm",
"image7.dcm",
"image8.dcm",
"image9.dcm",
"image10.dcm",
"image11.dcm",
"image12.dcm",
],
[1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1],
3,
]
]
| 24.901408 | 99 | 0.526584 |
f85fde926cda35a9fc74acc2b0acaa097f44bc32
| 456 |
py
|
Python
|
src/apps/notes/models.py
|
mentalnoteapp/backend-django-rest-framework
|
82d95fbe1aeb93b85105bf7ae94a3c13534f72cb
|
[
"MIT"
] | null | null | null |
src/apps/notes/models.py
|
mentalnoteapp/backend-django-rest-framework
|
82d95fbe1aeb93b85105bf7ae94a3c13534f72cb
|
[
"MIT"
] | null | null | null |
src/apps/notes/models.py
|
mentalnoteapp/backend-django-rest-framework
|
82d95fbe1aeb93b85105bf7ae94a3c13534f72cb
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db import models
from apps.tags.models import Tag
| 26.823529 | 72 | 0.736842 |
f86011337ef051c071ef0fd89e5bf4792bb54439
| 1,116 |
py
|
Python
|
tests/test_main.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 399 |
2020-08-31T21:13:07.000Z
|
2022-03-31T18:54:26.000Z
|
tests/test_main.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 157 |
2020-09-01T18:59:56.000Z
|
2022-03-25T07:14:19.000Z
|
tests/test_main.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 53 |
2020-09-01T07:35:59.000Z
|
2022-03-28T23:21:16.000Z
|
"""Tests for __main__.py."""
# import logging
from unittest.mock import MagicMock, patch
import pytest
import viseron.__main__
def test_init(simple_config, mocked_viseron):
"""Test init."""
viseron.__main__.main()
# viseron.__main__.LOGGER.info("testing")
with patch.object(viseron.__main__, "main", MagicMock()) as mock_main:
with patch.object(viseron.__main__, "__name__", "__main__"):
viseron.__main__.init()
mock_main.assert_called_once()
# class TestMyFormatter:
# """Tests for class MyFormatter."""
# def test_format(self):
# """Test formatter."""
# formatter = viseron.__main__.MyFormatter()
# record = logging.makeLogRecord(
# {
# "name": "test_logger",
# "level": 10,
# "pathname": "test_main.py",
# "msg": "Testing, message repeated 2 times",
# }
# )
# formatter.format(record)
| 27.219512 | 74 | 0.606631 |
f8626522d55b3754f7c28ddbfd44245ded575b28
| 11,950 |
py
|
Python
|
ironicclient/tests/unit/v1/test_allocation.py
|
ljmcgann/python-ironicclient
|
a5485dc29fe551e4cb5feaad52cd93d67b0ab53e
|
[
"Apache-2.0"
] | 41 |
2015-01-29T20:10:48.000Z
|
2022-01-26T10:04:28.000Z
|
ironicclient/tests/unit/v1/test_allocation.py
|
ljmcgann/python-ironicclient
|
a5485dc29fe551e4cb5feaad52cd93d67b0ab53e
|
[
"Apache-2.0"
] | null | null | null |
ironicclient/tests/unit/v1/test_allocation.py
|
ljmcgann/python-ironicclient
|
a5485dc29fe551e4cb5feaad52cd93d67b0ab53e
|
[
"Apache-2.0"
] | 46 |
2015-01-19T17:46:52.000Z
|
2021-12-19T01:22:47.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
import testtools
from ironicclient import exc
from ironicclient.tests.unit import utils
import ironicclient.v1.allocation
ALLOCATION = {'uuid': '11111111-2222-3333-4444-555555555555',
'name': 'Allocation-name',
'owner': None,
'state': 'active',
'node_uuid': '66666666-7777-8888-9999-000000000000',
'last_error': None,
'resource_class': 'baremetal',
'traits': [],
'candidate_nodes': [],
'extra': {}}
ALLOCATION2 = {'uuid': '55555555-4444-3333-2222-111111111111',
'name': 'Allocation2-name',
'owner': 'fake-owner',
'state': 'allocating',
'node_uuid': None,
'last_error': None,
'resource_class': 'baremetal',
'traits': [],
'candidate_nodes': [],
'extra': {}}
CREATE_ALLOCATION = copy.deepcopy(ALLOCATION)
for field in ('state', 'node_uuid', 'last_error'):
del CREATE_ALLOCATION[field]
fake_responses = {
'/v1/allocations':
{
'GET': (
{},
{"allocations": [ALLOCATION, ALLOCATION2]},
),
'POST': (
{},
CREATE_ALLOCATION,
),
},
'/v1/allocations/%s' % ALLOCATION['uuid']:
{
'GET': (
{},
ALLOCATION,
),
'DELETE': (
{},
None,
),
},
'/v1/allocations/?node=%s' % ALLOCATION['node_uuid']:
{
'GET': (
{},
{"allocations": [ALLOCATION]},
),
},
'/v1/allocations/?owner=%s' % ALLOCATION2['owner']:
{
'GET': (
{},
{"allocations": [ALLOCATION2]},
),
},
}
fake_responses_pagination = {
'/v1/allocations':
{
'GET': (
{},
{"allocations": [ALLOCATION],
"next": "http://127.0.0.1:6385/v1/allocations/?limit=1"}
),
},
'/v1/allocations/?limit=1':
{
'GET': (
{},
{"allocations": [ALLOCATION2]}
),
},
'/v1/allocations/?marker=%s' % ALLOCATION['uuid']:
{
'GET': (
{},
{"allocations": [ALLOCATION2]}
),
},
}
fake_responses_sorting = {
'/v1/allocations/?sort_key=updated_at':
{
'GET': (
{},
{"allocations": [ALLOCATION2, ALLOCATION]}
),
},
'/v1/allocations/?sort_dir=desc':
{
'GET': (
{},
{"allocations": [ALLOCATION2, ALLOCATION]}
),
},
}
| 33.194444 | 78 | 0.573138 |
f8629eacf541222ae1970586720f609c2d762f08
| 1,105 |
py
|
Python
|
api/routes/auth.py
|
rit-sse/api
|
4dbd04db98284225510d9ae8249514be80d4706a
|
[
"MIT"
] | 1 |
2015-07-17T19:20:45.000Z
|
2015-07-17T19:20:45.000Z
|
api/routes/auth.py
|
rit-sse/api
|
4dbd04db98284225510d9ae8249514be80d4706a
|
[
"MIT"
] | 33 |
2015-07-18T02:31:51.000Z
|
2015-08-04T02:07:41.000Z
|
api/routes/auth.py
|
rit-sse/api
|
4dbd04db98284225510d9ae8249514be80d4706a
|
[
"MIT"
] | 7 |
2015-07-17T16:29:18.000Z
|
2021-08-31T01:03:53.000Z
|
from flask import session, redirect, url_for
from flask.json import jsonify
from api import app, oauth
from api import models
| 27.625 | 83 | 0.656109 |
f863fdd49bdc9fc91c5a6863a1a6f2c9cb1fed2c
| 418 |
py
|
Python
|
mybatis/column_generator.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | 1 |
2018-09-19T06:27:14.000Z
|
2018-09-19T06:27:14.000Z
|
mybatis/column_generator.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
mybatis/column_generator.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
if __name__ == '__main__':
column_generator()
| 34.833333 | 91 | 0.586124 |
f86413e599720995225d5a002a0228bfbc9b7ed7
| 22,250 |
py
|
Python
|
ttslab/voices/afrikaans_default.py
|
jkleczar/ttslab
|
33fe0c3f88c1533816b2602b52e4162760d9c5f0
|
[
"BSD-3-Clause"
] | null | null | null |
ttslab/voices/afrikaans_default.py
|
jkleczar/ttslab
|
33fe0c3f88c1533816b2602b52e4162760d9c5f0
|
[
"BSD-3-Clause"
] | null | null | null |
ttslab/voices/afrikaans_default.py
|
jkleczar/ttslab
|
33fe0c3f88c1533816b2602b52e4162760d9c5f0
|
[
"BSD-3-Clause"
] | 1 |
2019-02-25T10:27:41.000Z
|
2019-02-25T10:27:41.000Z
|
# -*- coding: utf-8 -*-
""" This file contains language-specific implementation for an
Afrikaans voice.
The idea is that this file contains subclassed Voice and Phoneset
implementations. This package ttslab/voices may then also contain
speaker specific implementations e.g. "afrikaans_SPEAKER.py"
"""
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "Daniel van Niekerk"
__email__ = "[email protected]"
import re
from collections import OrderedDict
from .. phoneset import Phoneset
from .. defaultvoice import LwaziHTSVoice, LwaziPromHTSVoice
from .. synthesizer_htsme import SynthesizerHTSME
import ttslab.hts_labels_prom as hts_labels_prom
| 52.352941 | 177 | 0.465573 |
f865843e860d96b7840567719ae0919a197d73ae
| 144,813 |
py
|
Python
|
scripts/Iodide/project_misc.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | 1 |
2020-01-14T21:40:29.000Z
|
2020-01-14T21:40:29.000Z
|
scripts/Iodide/project_misc.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | null | null | null |
scripts/Iodide/project_misc.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains analysis done for the Ocean iodide (Oi!) project
This includes presentation at conferences etc...
"""
import numpy as np
import pandas as pd
import sparse2spatial as s2s
import sparse2spatial.utils as utils
import matplotlib
import matplotlib.pyplot as plt
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
# Get iodide specific functions
import observations as obs
def main():
"""
Run various misc. scripted tasks linked to the "iodide in the ocean" project
"""
pass
# ---- ----- ----- ----- ----- ----- ----- ----- -----
# ----- ----- Misc (associated iodide project tasks)
# These include getting CTM (GEOS-Chem) output for Anoop/Sawalha/TropMet
# --- Make planeflight files for cruise
# mk_pf_files4Iodide_cruise()
# mk_pf_files4Iodide_cruise(mk_column_output_files=True)
# Test the input files for these cruises?
# test_input_files4Iodide_cruise_with_plots()
# Test output files for cruises
# TEST_iodide_cruise_output()
# TEST_AND_PROCESS_iodide_cruise_output()
# TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False)
# Get numbers for data paper (data descriptor paper)
# get_numbers_for_data_paper()
# Get Longhurst province labelled NetCDF for res
# add_LonghurstProvince2NetCDF(res='4x5', ExStr='TEST_VI' )
# add_LonghurstProvince2NetCDF(res='2x2.5', ExStr='TEST_V' )
# add_LonghurstProvince2NetCDF(res='0.125x0.125', ExStr='TEST_VIII' )
# Add Longhurst Province to a lower res NetCDF file
# folder = './'
# filename = 'Oi_prj_output_iodide_field_1x1_deg_0_5_centre.nc'
# filename = 'Oi_prj_output_iodide_field_0_5x0_5_deg_centre.nc'
# ds = xr.open_dataset(folder+filename)
# add_LonghurstProvince2NetCDF(ds=ds, res='0.5x0.5', ExStr='TEST_VIII')
# process this to csv files for Indian' sea-surface paper
# ---------------------------------------------------------------------------
# ---------- Functions to produce output for Iodide obs. paper -------------
# ---------------------------------------------------------------------------
def get_PDF_of_iodide_exploring_data_rootset(show_plot=False,
ext_str=None):
""" Get PDF of plots exploring the iodide dataset """
import seaborn as sns
sns.set(color_codes=True)
# Get the data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
#
if ext_str == 'Open_ocean':
# Kludge data
# Kludge_tinel_data=True
# if Kludge_tinel_data:
# new_Data = [ 'He_2014', 'He_2013']
# new_Data += ['Chance_2018_'+i for i in 'I', 'II', 'III']
# df.loc[ df['Data_Key'].isin(new_Data), 'Coastal'] = False
# only take data flagged open ocean
df = df.loc[df[u'Coastal'] == 0.0, :]
elif ext_str == 'Coastal':
df = df.loc[df[u'Coastal'] == 1.0, :]
elif ext_str == 'all':
print('Using entire dataset')
else:
print('Need to set region of data to explore - currently', ext_str)
sys.exit()
# setup PDF
savetitle = 'Oi_prj_data_root_exploration_{}'.format(ext_str)
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
# current_palette = sns.color_palette()
current_palette = sns.color_palette("colorblind")
# --- --- --- --- --- --- --- ---
# ---- Add in extra varibles
# iodide / iodate
I_div_IO3_var = 'I$^{-}$/IO$_{3}^{-}$ (ratio)'
df[I_div_IO3_var] = df['Iodide'] / df['Iodate']
# total iodide
I_plus_IO3 = 'I$^{-}$+IO$_{3}^{-}$'
df[I_plus_IO3] = df['Iodide'] + df['Iodate']
# --- Add ocean basin to dataframe
area_var = 'Region'
df[area_var] = None
# setup a dummy column
# --- --- --- --- --- --- --- ---
# --- Plot dataset locations
sns.reset_orig()
# Get lats, lons and size of dataset
lats = df['Latitude'].values
lons = df['Longitude'].values
N_size = df.shape[0]
if ext_str == 'Open_ocean':
title = 'Iodide data (Open Ocean) explored in PDF (N={})'
else:
title = 'Iodide data (all) explored in this PDF (N={})'
# plot up
AC.plot_lons_lats_spatial_on_map(lats=lats, lons=lons,
title=title.format(N_size),
split_title_if_too_long=False,
f_size=10)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide to iodide ratio
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# plot up with no limits
df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(ext_str))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
ylimits = 1.5, 0.75, 0.5,
for ylimit in ylimits:
df.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
title = ' ({}, y axis limit: {})'.format(ext_str, ylimit)
plt.title(I_div_IO3_var + title)
plt.ylim(-0.05, ylimit)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# TODO - update to use proper definitions
# for southern ocean use the files below
# for rest https://www.nodc.noaa.gov/woce/woce_v3/wocedata_1/woce-uot/summary/bound.htm
#
# --- iodide to iodide ratio ( split by region )
# Between 120E and -80E its Pacific
upper_val = 120
lower_val = -80
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).any(axis=1))
varname = 'Pacific Ocean ({} to {}{})'.format(upper_val, lower_val, unit)
df.loc[bool, area_var] = varname
# Between -80E and 30E its Atlantic
upper_val = -80
lower_val = 30
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).all(axis=1))
varname = 'Atlantic Ocean ({} to {}{})'.format(lower_val, upper_val, unit)
df.loc[bool, area_var] = varname
# Between 30E and 120E its Indian
upper_val = 30
lower_val = 120
unit = '$^{o}$E'
bool_1 = df[u'Longitude'] >= upper_val
bool_2 = df[u'Longitude'] < lower_val
bool = (np.column_stack((bool_2, bool_1)).all(axis=1))
varname = 'Indian Ocean ({} to {}{})'.format(lower_val, upper_val, unit)
df.loc[bool, area_var] = varname
# if latitude below 60S, overwrite to be Southern ocean
varname = 'Southern Ocean'
df.loc[df['Latitude'] < -60, area_var] = varname
# --- --- --- --- --- --- --- ---
# --- locations of data
sns.reset_orig()
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# locations ?
lons = df_tmp[u'Longitude'].tolist()
lats = df_tmp[u'Latitude'].tolist()
#Now plot
AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats)
# fig=fig, ax=ax , color='blue', label=label, alpha=alpha,
# window=window, axis_titles=axis_titles, return_axis=True,
# p_size=p_size)
plt.title('{} ({})'.format(var_, ext_str))
if show_plot:
plt.show()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide to iodide ratio
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# plot up with no limits
df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
plt.title(I_div_IO3_var + ' ({}, y axis unlimited)'.format(var_))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
ylimits = 1.5, 0.75, 0.5
for ylimit in ylimits:
df_tmp.plot(kind='scatter', y=I_div_IO3_var, x='Latitude')
# beautify
title = ' ({}, y axis limit: {})'.format(var_, ylimit)
plt.title(I_div_IO3_var + title)
plt.ylim(-0.05, ylimit)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- --- --- --- --- --- --- ---
# --- iodide + iodide
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# loop regions
for var_ in list(set(df[area_var].tolist())):
# select data for area
df_tmp = df[df[area_var] == var_]
# plot up with no limits
df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude')
# beautify
plt.title(I_plus_IO3 + ' ({}, y axis unlimited)'.format(var_))
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# plot up with limits at 3
# ylimits = 1.5, 0.75, 0.5
# for ylimit in ylimits:
# df.plot(kind='scatter', y=I_plus_IO3, x='Latitude' )
# # beautify
# title= ' ({}, y axis limited to {})'.format(var_, ylimit)
# plt.title( I_plus_IO3 + title )
# plt.ylim(-0.05, ylimit )
# # Save to PDF and close plot
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# if show_plot: plt.show()
# plt.close()
# plot up with limits on y
ylimits = [100, 600]
# for ylimit in ylimits:
df_tmp.plot(kind='scatter', y=I_plus_IO3, x='Latitude')
# beautify
title = ' ({}, y axis={}-{})'.format(var_, ylimits[0], ylimits[1])
plt.title(I_plus_IO3 + title)
plt.ylim(ylimits[0], ylimits[1])
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
# ---------------------------------------------------------------------------
# ---------- Funcs. to process iodine obs/external data --------------------
# ---------------------------------------------------------------------------
def check_points_for_cruises(target='Iodide', verbose=False, debug=False):
"""
Check the cruise points for the new data (Tinel, He, etc...)
"""
# Get the observational data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# And the metadata
metadata_df = obs.get_iodide_obs_metadata()
# Only consider new datasets
new_cruises = metadata_df[metadata_df['In Chance2014?'] == 'N']
df = df[df['Data_Key'].isin(new_cruises['Data_Key'].tolist())]
# Strings to format printing
ptr_str_I = '- '*5 + 'Cruise: {:<20}'
ptr_str_II = '(Source: {:<20}, Location: {:<15}, N: {}, N(Iodide): {})'
# Print by cruise
for data_key in set(df['Data_Key']):
df_m_tmp = metadata_df[metadata_df['Data_Key'] == data_key]
df_tmp = df[df['Data_Key'] == data_key]
# Extract metadata
Cruise = df_m_tmp['Cruise'].values[0]
Source = df_m_tmp['Source'].values[0]
Location = df_m_tmp['Location'].values[0]
#
N = df_tmp.shape[0]
N_I = df_tmp[target].dropna().shape[0]
print(ptr_str_I.format(Cruise))
print(ptr_str_II.format(Source, Location, N, N_I))
# Points for all cruises
N = df.shape[0]
N_I = df[target].dropna().shape[0]
print(ptr_str_I.format('ALL new data'))
print(ptr_str_II.format('', '', N, N_I))
def plot_threshold_plus_SD_spatially(var=None, value=None, std=None, res='4x5',
fillcontinents=True, show_plot=False,
dpi=320, save2png=True,
verbose=True, debug=False):
"""
Plot up the spatial extent of a input variable value + Std. Dev.
"""
# - Local variables
# Get the core input variables
data_root = utils.get_file_locations('data_root')
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
ds = xr.open_dataset(data_root + filename)
# make sure the dataset has units
ds = add_units2ds(ds)
# Use appropriate plotting settings for resolution
if res == '0.125x0.125':
centre = True
else:
centre = False
# Get data
arr = ds[var].mean(dim='time').values
# colour in values above and below threshold (works)
arr[arr >= value] = 1
arr[arr >= value-std] = 0.5
arr[(arr != 1) & (arr != 0.5)] = 0.01
# Get units from dataset
units = ds[var].units
# Plot up
title_str = "'{}' ({}) threshold Value ({}) + \n Standard deviation ({})"
title = title_str.format(var, units, value, std)
if var == 'WOA_TEMP_K':
title += ' (in degC={}, std={})'.format(value-273.15, std)
# Plot using AC_tools
AC.plot_spatial_figure(arr,
# extend=extend,
# fixcb=fixcb, nticks=nticks, \
res=res, show=False, title=title, \
fillcontinents=fillcontinents, centre=centre, units=units,
# f_size=f_size,
no_cb=False)
# Use a tight layout
plt.tight_layout()
# Now save or show
if show_plot:
plt.show()
savetitle = 'Oi_prj_threshold_std_4_var_{}_{}'.format(var, res)
if save2png:
plt.savefig(savetitle+'.png', dpi=dpi)
plt.close()
# ---------------------------------------------------------------------------
# -------------- Reproduction of Chance et al (2014) figures ----------------
# ---------------------------------------------------------------------------
def plot_up_iodide_vs_latitude(show_plot=True):
"""
Reproduce Fig. 3 in Chance et al (2014)
Notes
----
- figure captions:
Variation of sea-surface iodide concentration with latitude for entire
data set (open diamonds) and open ocean data only (filled diamonds).
For clarity, one exceptionally high coastal iodide value (700 nM, 58.25N)
has been omitted.
"""
# - Get data
df = get_core_Chance2014_obs()
# Select data of interest
# ( later add a color selection based on coastal values here? )
vars = ['Iodide', 'Latitude']
print(df)
# and select coastal/open ocean
df_coastal = df[df['Coastal'] == True][vars]
df_open_ocean = df[~(df['Coastal'] == True)][vars]
# - Now plot Obs.
# plot coastal
ax = df_coastal.plot(kind='scatter', x='Latitude', y='Iodide', marker='D',
color='blue', alpha=0.1,
# markerfacecolor="None", **kwds )
)
# plot open ocean
ax = df_open_ocean.plot(kind='scatter', x='Latitude', y='Iodide',
marker='D', color='blue', alpha=0.5, ax=ax,
# markerfacecolor="None", **kwds )
)
# Update aesthetics of plot
plt.ylabel('[Iodide], nM')
plt.xlabel('Latitude, $^{o}$N')
plt.ylim(-5, 500)
plt.xlim(-80, 80)
# save or show?
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_Nitrate(show_plot=True):
"""
Reproduc Fig. 11 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed ( ) and
climatological ( ) nitrate concentration obtained from the World
Ocean Atlas as described in the text for all data (A) and nitrate
concentrations below 2 mM (B) and above 2 mM (C). Dashed lines in B
and C show the relationships between iodide and nitrate adapted from
Campos et al.41 by Ganzeveld et al.27
"""
# - location of data to plot
df = obs.get_processed_df_obs_mod()
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k') # ,
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations below 2 mM
df_tmp = df[df['Nitrate'] < 2]
df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k') # ,
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations above 2 mM
df_tmp = df[df['Nitrate'] > 2]
df_tmp.plot(kind='scatter', x='Nitrate', y='Iodide', marker='D',
color='k'),
plt.ylabel('LN[Iodide], nM')
plt.xlabel('LN[Nitrate], mM')
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_SST(show_plot=True):
"""
Reproduc Fig. 8 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed sea surface
temperature ( ) and climatological sea surface temperature ( ) values
obtained from the World Ocean Atlas as described in the text.
"""
# - location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Temperature', y='Iodide', marker='D',
color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Sea surface temperature (SST), $^{o}$C')
if show_plot:
plt.show()
plt.close()
def plot_up_ln_iodide_vs_salinity(show_plot=True):
"""
Reproduc Fig. 8 in Chance et al (2014)
Original caption:
Ln[iodide] concentration plotted against observed salinity ( , ) and
climatological salinity ( ) values obtained from the World Ocean Atlas as
described in the text for: (A) all data; (B) samples with salinity greater
than 30, shown in shaded area in (A). Note samples with salinity less than
30 have been excluded from further analysis and are not shown in Fig. 811.
"""
# - location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Just select non-coastal data
# df = df[ ~(df['Coastal']==True) ]
# take log of iodide
df['Iodide'] = np.log(df['Iodide'].values)
# - Plot up all nitrate concentrations
df.plot(kind='scatter', x='Salinity', y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations
df_tmp = df[df['Salinity'] < 30]
df_tmp.plot(kind='scatter', x='Salinity',
y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(-2, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
# - Plot up all nitrate concentrations
df_tmp = df[df['Salinity'] > 30]
df_tmp.plot(kind='scatter', x='Salinity',
y='Iodide', marker='D', color='k')
plt.ylabel('LN[Iodide], nM')
plt.xlabel('Salinity')
plt.xlim(29, AC.myround(max(df['Salinity']), 10, round_up=True))
if show_plot:
plt.show()
plt.close()
def plot_pair_grid(df=None, vars_list=None):
"""
Make a basic pair plot to test the data
"""
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from itertools import cycle
# make a kde plot
# define colormap to cycle
make_kde.cmap_cycle = cycle(('Blues_r', 'Greens_r', 'Reds_r', 'Purples_r'))
# Plot a pair plot
pg = sns.PairGrid(data, vars=vars_list)
# ---------------------------------------------------------------------------
# ---------------- New plotting of iodine obs/external data -----------------
# ---------------------------------------------------------------------------
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the gridded data for the Arctic and Antarctic
"""
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# Get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# Add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate',
# 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# setup PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space_PERTURBED'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Plot up the perturbations too
for perturb in perturb2use:
perturb
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs(dsA=None,
res='0.125x0.125',
dpi=320):
"""
Analyse the input data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
# get input variables
if isinstance(dsA, type(None)):
filename = 'Oi_prj_predicted_iodide_{}.nc'.format(res)
# folder = '/shared/earth_home/ts551/labbook/Python_progs/'
folder = '/shared/earth_home/ts551/data/iodide/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# ds = xr.open_dataset( filename )
# variables to consider
vars2analyse = list(dsA.data_vars)
# add LWI to array - NOTE: 1 = water in Nature run LWI files !
# ( The above comment is not correct! why is this written here? )
folderLWI = utils.get_file_locations(
'AC_tools')+'/data/LM/TEMP_NASA_Nature_run/'
filenameLWI = 'ctm.nc'
LWI = xr.open_dataset(folderLWI+filenameLWI)
# updates dates (to be Jan=>Dec)
new_dates = [datetime.datetime(1970, i, 1) for i in LWI['time.month']]
LWI.time.values = new_dates
# Sort by new dates
LWI = LWI.loc[{'time': sorted(LWI.coords['time'].values)}]
# LWI = AC.get_LWI_map(res=res)[...,0]
dsA['IS_WATER'] = dsA['WOA_TEMP'].copy()
dsA['IS_WATER'].values = (LWI['LWI'] == 0)
# add is land
dsA['IS_LAND'] = dsA['IS_WATER'].copy()
dsA['IS_LAND'].values = (LWI['LWI'] == 1)
# get surface area
s_area = AC.calc_surface_area_in_grid(res=res) # m2 land map
dsA['AREA'] = dsA['WOA_TEMP'].mean(dim='time')
dsA['AREA'].values = s_area.T
# - Select data of interest by variable for locations
# setup dicts to store the extracted values
df65N, df65S, dfALL = {}, {}, {}
# - setup booleans for the data
# now loop and extract variablesl
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
for var_ in vars2use:
# select the boolean for if water
IS_WATER = dsA['IS_WATER'].values
if IS_WATER.shape != dsA[var_].shape:
# special case for depth
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for all
ds_tmp = dsA.copy()
arr = np.ma.array(12*[ds_tmp[var_].values])
arr = arr[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
else:
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] >= 65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65N[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.sel(lat=(dsA['lat'] <= -65))
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
df65S[var_] = arr
del ds_tmp
# get value for >= 65
ds_tmp = dsA.copy()
arr = ds_tmp[var_].values[ds_tmp['IS_WATER'].values]
# add to saved arrays
dfALL[var_] = arr
del ds_tmp
# setup a dictionary of regions to plot from
dfs = {
'>=65N': pd.DataFrame(df65N), '>=65S': pd.DataFrame(df65S),
'Global': pd.DataFrame(dfALL),
}
# - Loop regions and plot PDFs of variables of interest
# vars2use = dfs[ dfs.keys()[0] ].columns
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_ancillaries_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][vars2use]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df)
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the PDF distribution of each of the variables.
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up the number of oceanic data points by lat for each lat
# Plot up number of samples for South pole
ds = dsA.sel(lat=(dsA['lat'] <= -65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes for Antarctic (<= -65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up number of samples for North pole
ds = dsA.sel(lat=(dsA['lat'] >= 65))
var_ = 'WOA_Salinity'
N = {}
for lat in ds['lat'].values:
ds_tmp = ds.sel(lat=lat)
N[lat] = ds_tmp[var_].values[ds_tmp['IS_WATER'].values].shape[-1]
N = pd.Series(N)
N.plot()
plt.ylabel('number of gridboxes in predictor array')
plt.xlabel('Latitude $^{\circ}$N')
plt.title('Number of gridboxes')
plt.title('Number of gridboxes for Arctic (>= 65N)')
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def explore_observational_data_in_Arctic_parameter_space(RFR_dict=None,
plt_up_locs4var_conds=False,
testset='Test set (strat. 20%)',
dpi=320):
"""
Analysis the input observational data for the Arctic and Antarctic
"""
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
# - local variables
df = RFR_dict['df']
# Set splits in data to look at
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['>=65N'][testset] == False
dfs['>=65N (training)'] = dfs['>=65N'].loc[bool_, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# Get all the data above 65 N and in the testset
bool_ = dfs['<=65S'][testset] == False
dfs['<=65S (training)'] = dfs['<=65S'].loc[bool_, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# - Loop regions and plot pairplots of variables of interest
# set PDF
savetitle = 'Oi_prj_explore_Arctic_Antarctic_obs_space'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# Loop by dataset (region) and plots
datasets = sorted(dfs.keys())
for dataset in datasets:
# select the DataFrame
df = dfs[dataset]
# Get sample size
N_ = df.shape[0]
# do a pair plot
g = sns.pairplot(df[vars2use])
# Add a title
plt.suptitle("Pairplot for '{}' (N={})".format(dataset, N_))
# adjust plots
g.fig.subplots_adjust(top=0.925, left=0.085)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
# Loop by dataset (region) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
#Plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
df = RFR_dict['df']
dfs = {}
# All data
dfs['All data'] = df.copy()
# Get all the data above 65 N
dfs['>=65N'] = df.loc[df['Latitude'] >= 65, :]
# Get all the data below 65 S
dfs['<=65S'] = df.loc[df['Latitude'] <= -65, :]
# - variables to explore?
vars2use = [
'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO',
]
# plot up the PDF distribution of each of the variables.
datasets = sorted(dfs.keys())
for var2use in vars2use:
print(var2use)
# set a single axis to use.
fig, ax = plt.subplots()
for dataset in datasets:
# select the DataFrame
df = dfs[dataset][var2use]
# Get sample size
N_ = df.shape[0]
# do a dist plot
label = '{} (N={})'.format(dataset, N_)
sns.distplot(df, ax=ax, label=label)
# Make sure the values are correctly scaled
ax.autoscale()
# Beautify
title_str = "PDF of ancillary input for '{}'"
fig.suptitle(title_str.format(var2use))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Loop regions and plot PDFs of variables of interest
if plt_up_locs4var_conds:
df = RFR_dict['df']
dfs = {}
# Nitrate greater of equal to
var_ = 'Nitrate >=15'
dfs[var_] = df.loc[df['WOA_Nitrate'] >= 15, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=15'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 15, :]
# Nitrate greater of equal to
var_ = 'Nitrate >=10'
dfs[var_] = df.loc[df['WOA_Nitrate'] >= 10, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=10'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 10, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=9'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 9, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=8'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 8, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=7'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 7, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=6'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 6, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=5'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 5, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=4'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 4, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=3'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 3, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=2'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 2, :]
# Nitrate greater of equal to
var_ = 'Nitrate <=1'
dfs[var_] = df.loc[df['WOA_Nitrate'] <= 1, :]
# Loop by dataset (nitrate values) and plots
import seaborn as sns
sns.reset_orig()
datasets = sorted(dfs.keys())
for dataset in datasets:
fig, ax = plt.subplots()
# select the DataFrame
dfA = dfs[dataset]
# Set title
title = "Locations for '{}'".format(dataset)
p_size = 50
alpha = 1
# plot up Non coatal locs
df = dfA.loc[dfA['Coastal'] == False, :]
color = 'blue'
label = 'Non-coastal (N={})'.format(int(df.shape[0]))
m = AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['Longitude'].values,
lats=df['Latitude'].values,
label=label, fig=fig, ax=ax, color=color,
return_axis=True)
#plot up coatal locs
df = dfA.loc[dfA['Coastal'] == True, :]
color = 'green'
label = 'Coastal (N={})'.format(int(df.shape[0]))
lons = df['Longitude'].values
lats = df['Latitude'].values
m.scatter(lons, lats, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def Driver2analyse_new_data_vs_existing_data():
"""
Driver to plot up all options for old vs. new analysis plots
"""
regions = 'all', 'coastal', 'noncoastal'
for limit_to_400nM in True, False:
for region in regions:
analyse_new_data_vs_existing_data(region=region,
limit_to_400nM=limit_to_400nM)
def analyse_new_data_vs_existing_data(limit_to_400nM=True, region='all'):
"""
build a set of analysis plots exploring the difference between new and
exisiting datasets
"""
# - Get obs. data
# Get data (inc. additions) and meta data
df_meta = obs.get_iodide_obs_metadata()
pro_df = obs.get_processed_df_obs_mod()
# - Setup plotting
# misc. shared variables
axlabel = '[I$^{-}_{aq}$] (nM)'
# setup PDf
savetitle = 'Oi_prj_new_vs_existing_datasets'
if limit_to_400nM:
# Exclude v. high values (N=7 - in final dataset)
pro_df = pro_df.loc[pro_df['Iodide'] < 400.]
savetitle += '_limited_to_400nM'
if region == 'all':
savetitle += '_all'
elif region == 'coastal':
pro_df = pro_df.loc[pro_df['Coastal'] == 1, :]
savetitle += '_{}'.format(region)
elif region == 'noncoastal':
pro_df = pro_df.loc[pro_df['Coastal'] == 0, :]
savetitle += '_{}'.format(region)
else:
sys.exit()
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
# - Plot up new data ( ~timeseries? )
New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key
var2plot = 'Iodide'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# if dates present in DataFrame, update axis
dates4cruise = pd.to_datetime(tmp_df['Date'].values)
if len(set(dates4cruise)) == tmp_df.shape[0]:
tmp_df.index = dates4cruise
xlabel = 'Date'
else:
xlabel = 'Obs #'
tmp_df[var2plot].plot()
ax = plt.gca()
plt.xlabel(xlabel)
plt.ylabel(axlabel)
title_str = "New {} data from '{}' ({})"
plt.title(title_str.format(var2plot.lower(), Cruise, dataset))
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of iodide )
var2plot = 'Iodide'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of salinity )
var2plot = u'WOA_Salinity'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of temperature )
var2plot = 'WOA_TEMP'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# - Plot up new data ( PDF of depth )
var2plot = u'Depth_GEBCO'
for dataset in New_datasets:
# Select new dataset
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# - Plot up PDF plots for the dataset
# plot whole dataset
obs_arr = pro_df[var2plot].values
ax = sns.distplot(obs_arr, axlabel=axlabel,
color='k', label='Whole dataset')
# plot just new data
ax = sns.distplot(tmp_df[var2plot], axlabel=axlabel, label=Cruise,
color='red', ax=ax)
# force y axis extend to be correct
ax.autoscale()
# Beautify
title = "PDF of '{}' {} data ({}) at obs. locations"
plt.title(title.format(dataset, var2plot, axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_diagnostic_plots_analysis4observations(inc_all_extract_vars=False,
include_hexbin_plots=False,
model_name='TEMP+DEPTH+SAL',
show_plot=False, dpi=320):
"""
Produce a PDF of comparisons of observations in dataset inventory
"""
# - Setup plotting
# misc. shared variables
axlabel = '[I$^{-}_{aq}$] (nM)'
# setup PDf
savetitle = 'Oi_prj_obs_plots'
if inc_all_extract_vars:
savetitle += '_all_extract_vars'
include_hexbin_plots = True
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# colours to use?
import seaborn as sns
# - Get obs. data
# Get data (inc. additions) and meta data
df_meta = obs.get_iodide_obs_metadata()
pro_df = obs.get_processed_df_obs_mod()
LOCAL_model_name = 'RFR({})'.format(model_name)
pro_df[LOCAL_model_name] = get_model_predictions4obs_point(pro_df,
model_name=model_name)
# Exclude v. high values (N=4 - in intial dataset)
# Exclude v. high values (N=7 - in final dataset)
pro_df = pro_df.loc[pro_df['Iodide'] < 400.]
# Add coastal flag to data
coastal_flag = 'coastal_flagged'
pro_df = get_coastal_flag(df=pro_df, coastal_flag=coastal_flag)
non_coastal_df = pro_df.loc[pro_df['coastal_flagged'] == 0]
dfs = {'Open-Ocean': non_coastal_df, 'All': pro_df}
# TODO ... add test dataset in here
# Get the point data for params...
point_ars_dict = {}
for key_ in dfs.keys():
point_ars_dict[key_] = {
'Obs.': dfs[key_]['Iodide'].values,
'MacDonald et al (2014)': dfs[key_]['MacDonald2014_iodide'].values,
'Chance et al (2014)': dfs[key_][u'Chance2014_STTxx2_I'].values,
'Chance et al (2014) - Mutivariate': dfs[key_][
u'Chance2014_Multivariate'
].values,
LOCAL_model_name: dfs[key_][LOCAL_model_name],
}
point_ars_dict = point_ars_dict['Open-Ocean']
parm_name_dict = {
'MacDonald et al (2014)': 'MacDonald2014_iodide',
'Chance et al (2014)': u'Chance2014_STTxx2_I',
'Chance et al (2014) - Mutivariate': u'Chance2014_Multivariate',
LOCAL_model_name: LOCAL_model_name,
}
point_data_names = sorted(point_ars_dict.keys())
point_data_names.pop(point_data_names.index('Obs.'))
param_names = point_data_names
# setup color dictionary
current_palette = sns.color_palette("colorblind")
colour_dict = dict(zip(param_names, current_palette[:len(param_names)]))
colour_dict['Obs.'] = 'K'
# --- Plot up locations of old and new data
import seaborn as sns
sns.reset_orig()
plot_up_data_locations_OLD_and_new(save_plot=False, show_plot=False)
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up all params against coastal data
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
xlabel = 'Obs.'
# just non-coastal
for param_name in sorted(parm_name_dict.keys()):
Y = non_coastal_df[parm_name_dict[param_name]].values
X = non_coastal_df['Iodide'].values
title = 'Regression plot of Open-ocean [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel, param_name)
ax = sns.regplot(x=X, y=Y)
# get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# title=None, add_ODR_trendline2plot=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(param_name)
# Adjust X and Y range
max_val = max(max(X), max(Y))
smidgen = max_val * 0.05
plt.xlim(0-smidgen, max_val+smidgen)
plt.ylim(0-smidgen, max_val+smidgen)
# Add 1:1
one2one = np.arange(0, max_val*2)
plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75,
label='1:1')
plt.legend()
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up all params against all data
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
xlabel = 'Obs.'
X = point_ars_dict[xlabel]
for param_name in point_data_names:
Y = point_ars_dict[param_name]
title = 'Regression plot of all [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel, param_name)
ax = sns.regplot(x=X, y=Y)
# get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# title=None, add_ODR_trendline2plot=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(param_name)
# Adjust X and Y range
max_val = max(max(X), max(Y))
smidgen = max_val * 0.05
plt.xlim(0-smidgen, max_val+smidgen)
plt.ylim(0-smidgen, max_val+smidgen)
# Add 1:1
one2one = np.arange(0, max_val*2)
plt.plot(one2one, one2one, color='k', linestyle='--', alpha=0.75,
label='1:1')
plt.legend()
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# ---- Plot up new data
New_datasets = df_meta.loc[df_meta['In Chance2014?'] == 'N'].Data_Key
var2plot = 'Iodide'
for dataset in New_datasets:
tmp_df = pro_df.loc[pro_df['Data_Key'] == dataset]
Cruise = tmp_df['Cruise'].values[0]
# if dates present in DataFrame, update axis
dates4cruise = pd.to_datetime(tmp_df['Date'].values)
if len(set(dates4cruise)) == tmp_df.shape[0]:
tmp_df.index = dates4cruise
xlabel = 'Date'
else:
xlabel = 'Obs #'
tmp_df[var2plot].plot()
ax = plt.gca()
# ax.axhline(30, color='red', label='Chance et al 2014 coastal divide')
plt.xlabel(xlabel)
plt.ylabel(axlabel)
title_str = "New {} data from '{}' ({})"
plt.title(title_str.format(var2plot.lower(), Cruise, dataset))
# plt.legend()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# Plot up Salinity
# var2plot = 'WOA_Salinity'
# for dataset in New_datasets:
# tmp_df = pro_df.loc[ pro_df['Data_Key'] == dataset ]
# tmp_df[var2plot].plot()
# ax= plt.gca()
# ax.axhline(30, color='red', label='Chance et al 2014 coastal divide')
# plt.xlabel( 'Obs #')
# plt.ylabel( 'PSU' )
# plt.title( '{} during cruise from {}'.format( var2plot, dataset ) )
# plt.legend()
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# plt.close()
# ---- Plot up key comparisons for coastal an non-coastal data
for key_ in sorted(dfs.keys()):
# --- Ln(Iodide) vs. T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_TEMP'
X = dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Ln(Iodide) vs. 1/T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_TEMP_K'
X = 1 / dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, '1/'+xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Ln(Iodide) vs. 1/T
ylabel = 'ln(Iodide)'
Y = dfs[key_][ylabel].values
xlabel = 'WOA_Salinity'
X = dfs[key_][xlabel].values
# Plot up
ax = sns.regplot(x=X, y=Y)
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show_plot:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# ---
if inc_all_extract_vars:
for key_ in sorted(dfs.keys()):
# List extract vraiables
extracted_vars = [
u'WOA_TEMP', u'WOA_Nitrate', u'WOA_Salinity', u'WOA_Dissolved_O2', u'WOA_Phosphate', u'WOA_Silicate', u'Depth_GEBCO', u'SeaWIFs_ChlrA', u'WOA_MLDpt', u'WOA_MLDpt_max', u'WOA_MLDpt_sum', u'WOA_MLDpd', u'WOA_MLDpd_max', u'WOA_MLDpd_sum', u'WOA_MLDvd', u'WOA_MLDvd_max', u'WOA_MLDvd_sum', u'DOC', u'DOCaccum', u'Prod', u'SWrad'
]
# Loop extraced variables and plot
for var_ in extracted_vars:
ylabel = var_
xlabel = 'Iodide'
tmp_df = dfs[key_][[xlabel, ylabel]]
# Kludge to remove '--' from MLD columns
for col in tmp_df.columns:
bool_ = [i == '--' for i in tmp_df[col].values]
tmp_df.loc[bool_, :] = np.NaN
if tmp_df[col].dtype == 'O':
tmp_df[col] = pd.to_numeric(tmp_df[col].values,
errors='coerce')
print(var_, tmp_df.min(), tmp_df.max())
# X = dfs[key_][xlabel].values
# Plot up ax = sns.regplot(x=xlabel, y=ylabel, data=tmp_df )
# Beautify
title = '{} vs {} ({} data)'.format(ylabel, xlabel, key_)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# --- Plot up Just observations and predicted values from models as PDF
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# plot 1st model...
point_name = 'Obs.'
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name])
# Add MacDonald, Chance...
for point_name in point_data_names:
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name])
# force y axis extend to be correct
ax.autoscale()
# Beautify
plt.title('PDF of predicted iodide ({}) at obs. points'.format(axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up Just observations and predicted values from models as CDF
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# plot 1st model...
point_name = 'Obs.'
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name],
hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
# Add MacDonald, Chance...
for point_name in point_data_names:
arr = point_ars_dict[point_name]
ax = sns.distplot(arr, axlabel=axlabel, label=point_name,
color=colour_dict[point_name],
hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
# force y axis extend to be correct
ax.autoscale()
# Beautify
plt.title('CDF of predicted iodide ({}) at obs. points'.format(axlabel))
plt.legend()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# --- Plot up parameterisations as regression
# import seaborn as sns; sns.set(color_codes=True)
# sns.set_context("paper")
# xlabel = 'Obs.'
# X = point_ars_dict[xlabel]
# for point_name in point_data_names:
# title = 'Regression plot of [I$^{-}_{aq}$] (nM) '
# title = title + '{} vs {} parameterisation'.format(xlabel, point_name )
# Y = point_ars_dict[point_name]
# ax = sns.regplot(x=X, y=Y )
# # get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name, log=False,
# # title=None, add_ODR_trendline2plot=True)
# plt.title(title)
# plt.xlabel(xlabel)
# plt.ylabel(point_name)
# # Save to PDF and close plot
# AC.plot2pdfmulti( pdff, savetitle, dpi=dpi )
# plt.close()
# --- Plot up parameterisations as hexbin plot
if include_hexbin_plots:
xlabel = 'Obs.'
X = point_ars_dict[xlabel]
for point_name in point_data_names:
title = 'Hexbin of [I$^{-}_{aq}$] (nM) \n'
title = title + '{} vs {} parameterisation'.format(xlabel,
point_name)
Y = point_ars_dict[point_name]
get_hexbin_plot(x=X, y=Y, xlabel=None, ylabel=point_name,
log=False, title=title, add_ODR_trendline2plot=True)
# plt.show()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def plot_PDF_iodide_obs_mod(bins=10):
"""
plot up PDF of predicted values vs. observations
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Location of data to plot
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Just select non-coastal data
print(df.shape)
df = df[~(df['Coastal'] == True)]
# df = df[ ~(df['Coastal']==True) ]
# Salinity greater than 30
# df = df[ (df['Salinity'] > 30 ) ]
print(df.shape)
# Plot up data
# Macdonaly et al 2014 values
ax = sns.distplot(df['MacDonald2014_iodide'],
label='MacDonald2014_iodide', bins=bins)
# Chance et al 2014 values
ax = sns.distplot(df['Chance2014_STTxx2_I'],
label='Chance2014_STTxx2_I', bins=bins)
# Iodide obs.
ax = sns.distplot(df['Iodide'], label='Iodide, nM', bins=bins)
# Update aesthetics and show plot?
plt.xlim(-50, 400)
plt.legend(loc='upper right')
plt.show()
def plt_predicted_iodide_vs_obs_Q1_Q3(dpi=320, show_plot=False,
limit_to_400nM=False, inc_iodide=False):
"""
Plot predicted iodide on a latitudinal basis
NOTES
- the is the just obs. location equivilent of the plot produced to show
predict values for all global locations
(Oi_prj_global_predicted_vals_vs_lat)
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# Get data
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# Local variables
# sub select variables of interest.
params2plot = [
'Chance2014_STTxx2_I', 'MacDonald2014_iodide',
]
# Set names to overwrite variables with
rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)',
u'MacDonald2014_iodide': 'MacDonald et al. (2014)',
'RFR(Ensemble)': 'RFR(Ensemble)',
'Iodide': 'Obs.',
# u'Chance2014_Multivariate': 'Chance et al. (2014) (Multi)',
}
# filename to save values
filename = 'Oi_prj_global_predicted_vals_vs_lat_only_obs_locs'
# include iodide observations too?
if inc_iodide:
params2plot += ['Iodide']
filename += '_inc_iodide'
CB_color_cycle = AC.get_CB_color_cycle()
color_d = dict(zip(params2plot, CB_color_cycle))
#
if limit_to_400nM:
df = df.loc[df['Iodide'] < 400, :]
filename += '_limited_400nM'
# - Process data
# Add binned mean
# bins = np.arange(-70, 70, 10 )
bins = np.arange(-80, 90, 10)
# groups = df.groupby( np.digitize(df[u'Latitude'], bins) )
groups = df.groupby(pd.cut(df['Latitude'], bins))
# Take means of groups
# groups_avg = groups.mean()
groups_des = groups.describe().unstack()
# - setup plotting
fig, ax = plt.subplots(dpi=dpi)
# - Plot up
X = groups_des['Latitude']['mean'].values # groups_des.index
# X =bins
print(groups_des)
# plot groups
for var_ in params2plot:
# Get quartiles
Q1 = groups_des[var_]['25%'].values
Q3 = groups_des[var_]['75%'].values
# Add median
ax.plot(X, groups_des[var_]['50%'].values,
color=color_d[var_], label=rename_titles[var_])
# add shading for Q1/Q3
ax.fill_between(X, Q1, Q3, alpha=0.2, color=color_d[var_])
# - Plot observations
# Highlight coastal obs
tmp_df = df.loc[df['Coastal'] == True, :]
X = tmp_df['Latitude'].values
Y = tmp_df['Iodide'].values
plt.scatter(X, Y, color='k', marker='D', facecolor='none', s=3,
label='Coastal obs.')
# non-coastal obs
tmp_df = df.loc[df['Coastal'] == False, :]
X = tmp_df['Latitude'].values
Y = tmp_df['Iodide'].values
plt.scatter(X, Y, color='k', marker='D', facecolor='k', s=3,
label='Non-coastal obs.')
# - Beautify
# Add legend
plt.legend()
# Limit plotted y axis extent
plt.ylim(-20, 420)
plt.ylabel('[I$^{-}_{aq}$] (nM)')
plt.xlabel('Latitude ($^{\\rm o}$N)')
plt.savefig(filename, dpi=dpi)
if show_plot:
plt.show()
plt.close()
def plot_up_data_locations_OLD_and_new(save_plot=True, show_plot=False,
extension='eps', dpi=720):
"""
Plot up old and new data on map
"""
import seaborn as sns
sns.reset_orig()
# - Setup plot
figsize = (11, 5)
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
p_size = 25
alpha = 0.5
window = True
axis_titles = False
# - Get all observational data
df, md_df = obs.get_iodide_obs()
# Seperate into new and old data
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
new_metadata_df = md_df.loc[
md_df['In Chance2014?'] == 'N'
]
new_Data_Keys = new_metadata_df['Data_Key'].values
bool = df['Data_Key'].isin(new_Data_Keys)
# old data
df1 = df.loc[~bool]
# new data
df2 = df.loc[bool]
# --- add existing data
# Get existing data... (Chance et al 2014 )
# folder = utils.get_file_locations('data_root')
# f = 'Iodine_obs_WOA.csv'
# df1 = pd.read_csv(folderf, encoding='utf-8' )
# Select lons and lats
lats1 = df1['Latitude'].values
lons1 = df1['Longitude'].values
# Plot up and return basemap axis
label = 'Chance et al. (2014) (N={})'.format(
df1['Iodide'].dropna().shape[0])
m = AC.plot_lons_lats_spatial_on_map(lons=lons1, lats=lats1,
fig=fig, ax=ax, color='blue', label=label,
alpha=alpha,
window=window, axis_titles=axis_titles,
return_axis=True, p_size=p_size)
# - Add in new data following Chance2014?
# this is ~ 5 samples from the Atlantic (and some from Indian ocean?)
# ... get this at a later date...
# - Add in SOE-9 data
# f = 'Iodine_climatology_ISOE9.xlsx'
# df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 )
# Data from SOE-9
lats2 = df2['Latitude'].values
lons2 = df2['Longitude'].values
color = 'red'
label = 'Additional data (N={})'
label = label.format(df2['Iodide'].dropna().shape[0])
m.scatter(lons2, lats2, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label)
# - Save out / show
leg = plt.legend(fancybox=True, loc='upper right')
leg.get_frame().set_alpha(0.95)
if save_plot:
savename = 'Oi_prj_Obs_locations.{}'.format(extension)
plt.savefig(savename, bbox_inches='tight', dpi=dpi)
if show_plot:
plt.show()
def plot_up_data_locations_OLD_and_new_CARTOPY(save_plot=True, show_plot=False,
extension='eps', dpi=720):
"""
Plot up old and new data on map
"""
import seaborn as sns
sns.reset_orig()
# - Setup plot
# figsize = (11, 5)
figsize = (11*2, 5*2)
fig = plt.figure(figsize=figsize, dpi=dpi)
# fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
fig, ax = None, None
p_size = 15
alpha = 0.5
window = True
axis_titles = False
# - Get all observational data
df, md_df = obs.get_iodide_obs()
# Seperate into new and old data
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
new_metadata_df = md_df.loc[
md_df['In Chance2014?'] == 'N'
]
new_Data_Keys = new_metadata_df['Data_Key'].values
bool = df['Data_Key'].isin(new_Data_Keys)
# old data
df1 = df.loc[~bool]
# new data
df2 = df.loc[bool]
# --- add existing data
# Get existing data... (Chance et al 2014 )
# folder = utils.get_file_locations('data_root')
# f = 'Iodine_obs_WOA.csv'
# df1 = pd.read_csv(folderf, encoding='utf-8' )
# Select lons and lats
lats1 = df1['Latitude'].values
lons1 = df1['Longitude'].values
# Plot up and return basemap axis
label = 'Chance et al. (2014) (N={})'.format(
df1['Iodide'].dropna().shape[0])
ax = plot_lons_lats_spatial_on_map_CARTOPY(lons=lons1, lats=lats1,
fig=fig, ax=ax, color='blue', label=label,
alpha=alpha, dpi=dpi,
# window=window, axis_titles=axis_titles,
# return_axis=True,
# add_detailed_map=True,
add_background_image=False,
add_gridlines=False,
s=p_size)
# - Add in new data following Chance2014?
# this is ~ 5 samples from the Atlantic (and some from Indian ocean?)
# ... get this at a later date...
# - Add in SOE-9 data
# f = 'Iodine_climatology_ISOE9.xlsx'
# df2 = pd.read_excel(folder'/Liselotte_data/'+f, skiprows=1 )
# Data from SOE-9
lats2 = df2['Latitude'].values
lons2 = df2['Longitude'].values
color = 'red'
label = 'Additional data (N={})'
label = label.format(df2['Iodide'].dropna().shape[0])
ax.scatter(lons2, lats2, edgecolors=color, c=color, marker='o',
s=p_size, alpha=alpha, label=label, zorder=1000)
# - Save out / show
leg = plt.legend(fancybox=True, loc='upper right', prop={'size': 6})
leg.get_frame().set_alpha(0.95)
if save_plot:
savename = 'Oi_prj_Obs_locations.{}'.format(extension)
plt.savefig(savename, bbox_inches='tight', dpi=dpi)
if show_plot:
plt.show()
def map_plot_of_locations_of_obs():
"""
Plot up locations of observations of data to double check
"""
import matplotlib.pyplot as plt
# - Settings
plot_all_as_one_plot = True
show = True
# - Get data
folder = utils.get_file_locations('data_root')
f = 'Iodine_obs_WOA.csv'
df = pd.read_csv(folder+f, encoding='utf-8')
# only consider non-coastal locations
print(df.shape)
# df = df[ df['Coastal'] == 1.0 ] # select coastal locations
# df = df[ df['Coastal'] == 0.0 ] # select non coastal locations
# only consider locations with salinity > 30
df = df[df['Salinity'] > 30.0] # select coastal locations
print(df.shape)
# Get coordinate values
all_lats = df['Latitude'].values
all_lons = df['Longitude'].values
# Get sub lists of unique identifiers for datasets
datasets = list(set(df['Data_Key']))
n_datasets = len(datasets)
# - Setup plot
#
f_size = 10
marker = 'o'
p_size = 75
dpi = 600
c_list = AC.color_list(int(n_datasets*1.25))
print(c_list, len(c_list))
# plot up white background
arr = np.zeros((72, 46))
vmin, vmax = 0, 0
# - just plot up all sites to test
if plot_all_as_one_plot:
# Setup a blank basemap plot
fig = plt.figure(figsize=(12, 6), dpi=dpi,
facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(111)
plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size*2,
fixcb=[
vmin, vmax], ax=ax1, no_cb=True, resolution='c',
ylabel=True, xlabel=True)
# Scatter plot of points.
m.scatter(all_lons, all_lats, edgecolors=c_list[1], c=c_list[1],
marker=marker, s=p_size, alpha=1,)
# Save and show?
plt.savefig('Iodide_dataset_locations.png', dpi=dpi, transparent=True)
if show:
plt.show()
else:
chunksize = 5
chunked_list = AC.chunks(datasets, chunksize)
counter = 0
for n_chunk_, chunk_ in enumerate(chunked_list):
# Setup a blank basemap plot
fig = plt.figure(figsize=(12, 6), dpi=dpi, facecolor='w',
edgecolor='k')
ax1 = fig.add_subplot(111)
plt, m = AC.map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size*2,
fixcb=[vmin, vmax], ax=ax1,
no_cb=True, resolution='c',
ylabel=True, xlabel=True)
# Loop all datasets
for n_dataset_, dataset_ in enumerate(chunk_):
print(n_chunk_, counter, dataset_, c_list[counter])
#
df_sub = df[df['Data_Key'] == dataset_]
lats = df_sub['Latitude'].values
lons = df_sub['Longitude'].values
# Plot up and save.
color = c_list[n_chunk_::chunksize][n_dataset_]
m.scatter(lons, lats, edgecolors=color, c=color,
marker=marker, s=p_size, alpha=.5, label=dataset_)
# add one to counter
counter += 1
plt.legend()
# save chunk...
plt.savefig('Iodide_datasets_{}.png'.format(n_chunk_), dpi=dpi,
transparent=True)
if show:
plt.show()
def plot_up_parameterisations(df=None, save2pdf=True, show=False):
"""
Plot up parameterisations
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Consider both Chance and MacDonald parameterisations
params = [i for i in df.columns if ('Mac' in i)]
params += [i for i in df.columns if ('Chance' in i)]
# get details of parameterisations
# filename='Chance_2014_Table2_PROCESSED_17_04_19.csv'
filename = 'Chance_2014_Table2_PROCESSED.csv'
folder = utils.get_file_locations('data_root')
param_df = pd.read_csv(folder+filename)
# only consider non-coastal locations?
print(df.shape)
# df = df[ df['Coastal'] == 1.0 ] # select coastal locations
# df = df[ df['Coastal'] == 0.0 ] # select non coastal locations
# only consider locations with salinity > 30
df = df[df['Salinity'] > 30.0] # select coastal locations
print(df.shape)
# df = df[ df['Iodide'] < 300 ]
# Setup pdf
if save2pdf:
dpi = 320
savetitle = 'Chance2014_params_vs_recomputed_params'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Loop parameterisations
# for param in params[:2]: # Only loop two if debugging
for param in params:
# Get meta data for parameter
sub_df = param_df[param_df['TMS ID'] == param]
# Setup a new figure
fig = plt.figure()
# Extract Iodide and param data...
# Take logs of data?
iodide_var = 'Iodide'
try:
print(sub_df['ln(iodide)'].values[0])
if sub_df['ln(iodide)'].values[0] == 'yes':
iodide_var = 'ln(Iodide)'
print('Using log values for ', param)
else:
print('Not using log values for ', param)
except:
print('FAILED to try and use log data for ', param)
X = df[iodide_var].values
# And parameter data?
Y = df[param].values
# Remove nans...
tmp_df = pd.DataFrame(np.array([X, Y]).T, columns=['X', 'Y'])
print(tmp_df.shape)
tmp_df = tmp_df.dropna()
print(tmp_df.shape)
X = tmp_df['X'].values
Y = tmp_df['Y'].values
# PLOT UP as X vs. Y scatter...
title = '{} ({})'.format(param, sub_df['Independent variable'].values)
ax = mk_X_Y_scatter_plot_param_vs_iodide(X=X, Y=Y, title=title,
iodide_var=iodide_var)
# Add Chance2014's R^2 to plot...
try:
R2 = str(sub_df['R2'].values[0])
c = str(sub_df['c'].values[0])
m = str(sub_df['m'].values[0])
eqn = 'y={}x+{}'.format(m, c)
print(R2, c, m, eqn)
alt_text = 'Chance et al (2014) R$^2$'+':{} ({})'.format(R2, eqn)
ax.annotate(alt_text, xy=(0.5, 0.90), textcoords='axes fraction',
fontsize=10)
except:
print('FAILED to get Chance et al values for', param)
# plt.text( 0.75, 0.8, alt_text, ha='center', va='center')
# show/save?
if save2pdf:
# Save out figure
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
del fig
# save entire pdf
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
plt.close("all")
def mk_X_Y_scatter_plot_param_vs_iodide(X=None, Y=None, iodide_var=None,
title=None):
"""
Plots up a X vs. Y plot for a parameterisation of iodine (Y) against obs iodide (X)
"""
import matplotlib.pyplot as plt
import seaborn as sns
# Plot up
plt.scatter(X, Y, marker='+', alpha=0.5)
plt.title(title)
plt.ylabel('Param. [Iodide], nM')
plt.xlabel('Obs. [{}], nM'.format(iodide_var))
# Add a trendline
ax = plt.gca()
AC.Trendline(ax, X=X, Y=Y, color='green')
# Adjust x and y axis limits
round_max_X = AC.myround(max(X), 50, round_up=True)
round_max_Y = AC.myround(max(Y), 50, round_up=True)
if iodide_var == 'ln(Iodide)':
round_max_X = AC.myround(max(X), 5, round_up=True)
round_max_Y = AC.myround(max(Y), 5, round_up=True)
plt.xlim(-(round_max_X/40), round_max_X)
plt.ylim(-(round_max_Y/40), round_max_Y)
# Add an N value to plot
alt_text = '(N={})'.format(len(X))
ax.annotate(alt_text, xy=(0.8, 0.10),
textcoords='axes fraction', fontsize=10)
return ax
def compare_obs_ancillaries_with_extracted_values_WINDOW(dpi=320, df=None):
"""
Plot up a window plot of the observed vs. climatological ancillaries
"""
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=0.75)
# Get the observational data
if isinstance(df, type(None)):
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Map observational variables to their shared extracted variables
all_vars = df.columns.tolist()
# Dictionary
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# units dict?
units_dict = {
'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L
'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity
'WOA_Nitrate': "$\mu$M",
'WOA_TEMP': '$^{o}$C',
}
# Colors to use
CB_color_cycle = AC.get_CB_color_cycle()
# set the order the dict keys are accessed
vars_sorted = list(sorted(obs_var_dict.keys()))[::-1]
# setup plot
fig = plt.figure(dpi=dpi, figsize=(5, 7.35))
# - 1st plot Salinity ( all and >30 PSU )
# - All above
var2plot = 'WOA_Salinity'
plot_n = 1
color = CB_color_cycle[0]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# Plot up the data as a scatter
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# Label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# Title the plots
title = 'Salinity (all, {})'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# Add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# Add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# - All above
var2plot = 'WOA_Salinity'
plot_n = 2
color = CB_color_cycle[0]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
# Select only data greater that 30 PSU
df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] >= 30, :]
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# title the plots
title = 'Salinity ($\geq$ 30, PSU)'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05
ax_min = 29
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# --- Loop and plot
for n_var2plot, var2plot in enumerate(['WOA_TEMP', 'WOA_Nitrate', ]):
plot_n = 2 + 1 + n_var2plot
color = CB_color_cycle[plot_n]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
# title the plots
title = '{} ({})'.format(obs_var_dict[var2plot], units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# Add a line for orthogonal distance regression (ODR)
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# --- 1st plot Salinity ( all and >30 PSU )
# - All above
var2plot = 'SeaWIFs_ChlrA'
plot_n = 5
color = CB_color_cycle[5]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
ax.set_xlabel('Observed')
# title the plots
title = 'ChlrA (all, {})'.format(units_dict[var2plot])
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 5, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# - All above
var2plot = 'SeaWIFs_ChlrA'
plot_n = 6
color = CB_color_cycle[5]
# Make a new axis
ax = fig.add_subplot(3, 2, plot_n, aspect='equal')
# Get the data
df_tmp = df[[obs_var_dict[var2plot], var2plot]].dropna()
# Select only data greater that 30 PSU
df_tmp = df_tmp.loc[df_tmp[obs_var_dict[var2plot]] <= 5, :]
N_ = int(df_tmp[[var2plot]].shape[0])
MSE_ = np.mean((df_tmp[obs_var_dict[var2plot]] - df_tmp[var2plot])**2)
RMSE_ = np.sqrt(MSE_)
print(N_, MSE_, RMSE_)
X = df_tmp[obs_var_dict[var2plot]].values
Y = df_tmp[var2plot].values
# plot up
ax.scatter(X, Y, edgecolors=color, facecolors='none', s=5)
# label Y axis
if plot_n in np.arange(1, 6)[::2]:
ax.set_ylabel('Extracted')
ax.set_xlabel('Observed')
# title the plots
units = units_dict[var2plot]
title = 'ChlrA ($\leq$5 {})'.format(units)
ax.text(0.5, 1.05, title, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
# Add N value
stats_str = 'N={} \nRMSE={:.3g}'.format(N_, RMSE_)
ax.text(0.05, 0.9, stats_str, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
# add a 1:1 line
ax_max = df_tmp.max().max()
ax_max = AC.myround(ax_max, 1, round_up=True) * 1.05
ax_min = df_tmp.min().min()
ax_min = ax_min - (ax_max*0.05)
x_121 = np.arange(ax_min, ax_max*1.5)
ax.plot(x_121, x_121, alpha=0.5, color='k', ls='--')
# add ODR line
xvalues, Y_ODR = AC.get_linear_ODR(x=X, y=Y, xvalues=x_121,
return_model=False, maxit=10000)
ax.plot(xvalues, Y_ODR, color=color, ls='--')
# Force axis extents
ax.set_aspect('equal')
ax.set_xlim(ax_min, ax_max)
ax.set_ylim(ax_min, ax_max)
ax.set_aspect('equal')
# -- adjust figure and save
# Adjust plot
left = 0.075
right = 0.975
wspace = 0.05
hspace = 0.175
top = 0.95
bottom = 0.075
fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
# Save
filename = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params_WINDOW'
plt.savefig(filename, dpi=dpi)
def compare_obs_ancillaries_with_extracted_values(df=None, save2pdf=True,
show=False, dpi=320):
"""
Some species in the dataframe have observed as well as climatology values.
For these species, plot up X/Y and latitudinal comparisons
"""
import seaborn as sns
sns.set(color_codes=True)
current_palette = sns.color_palette("colorblind")
sns.set_style("darkgrid")
# Get the observational data
if isinstance(df, type(None)):
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Map observational variables to their shared extracted variables
all_vars = df.columns.tolist()
# Dictionary
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# Dict of units for variables
units_dict = {
'SeaWIFs_ChlrA': "mg m$^{-3}$", # Chance et al uses micro g/L
'WOA_Salinity': 'PSU', # https://en.wikipedia.org/wiki/Salinity
'WOA_Nitrate': "$\mu$M",
'WOA_TEMP': '$^{o}$C',
}
# sort dataframe by latitude
# df = df.sort_values('Latitude', axis=0, ascending=True)
# set the order the dict keys are accessed
vars_sorted = list(sorted(obs_var_dict.keys()))[::-1]
# Setup pdf
if save2pdf:
savetitle = 'Oi_prj_Chance2014_Obs_params_vs_NEW_extracted_params'
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Get variables and confirm which datasets are being used for plot
dfs = {}
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# drop nans...
index2use = df[[obs_var_dict[key_], key_]].dropna().index
dfs[key_] = df.loc[index2use, :]
# Check which datasets are being used
ptr_str = 'For variable: {} (#={})- using: {} \n'
for key_ in vars_sorted:
datasets = list(set(dfs[key_]['Data_Key']))
dataset_str = ', '.join(datasets)
print(ptr_str.format(key_, len(datasets), dataset_str))
# - Loop variables and plot as a scatter plot...
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# new figure
fig = plt.figure()
# drop nans...
df_tmp = df[[obs_var_dict[key_], key_]].dropna()
N_ = int(df_tmp[[key_]].shape[0])
print(N_)
# Plot up
sns.regplot(x=obs_var_dict[key_], y=key_, data=df_tmp)
# Add title
plt.title('X-Y plot of {} (N={})'.format(obs_var_dict[key_], N_))
plt.ylabel('Extracted ({}, {})'.format(key_, units_dict[key_]))
plt.xlabel('Obs. ({}, {})'.format(
obs_var_dict[key_], units_dict[key_]))
# Save out figure &/or show?
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
plt.close()
# - Loop variables and plot verus lat (with difference)
for key_ in vars_sorted:
print(obs_var_dict[key_], key_)
# New figure
fig = plt.figure()
# Drop nans...
df_tmp = df[[obs_var_dict[key_], key_, 'Latitude']].dropna()
N_ = int(df_tmp[[key_]].shape[0])
print(N_)
# Get data to analyse
obs = df_tmp[obs_var_dict[key_]].values
climate = df_tmp[key_].values
X = df_tmp['Latitude'].values
# Plot up
plt.scatter(X, obs, label=obs_var_dict[key_], color='red',
marker="o")
plt.scatter(X, climate, label=key_, color='blue',
marker="o")
plt.scatter(X, climate-obs, label='diff', color='green',
marker="o")
# Athesetics of plot?
plt.legend()
plt.xlim(-90, 90)
plt.ylabel('{} ({})'.format(obs_var_dict[key_], units_dict[key_]))
plt.xlabel('Latitude ($^{o}$N)')
plt.title('{} (N={}) vs. latitude'.format(obs_var_dict[key_], N_))
# Save out figure &/or show?
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show:
plt.show()
plt.close()
# Save entire pdf
if save2pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def plot_up_lat_STT_var(restrict_data_max=True, restrict_min_salinity=True):
"""
Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var.
"""
# - Get data as a DataFrame
df = obs.get_processed_df_obs_mod()
if restrict_data_max:
# df = df[ df['Iodide']< 450. ]
df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value
if restrict_min_salinity:
df = df[df['WOA_Salinity'] > 30.]
# Add modulus
df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2)
# - Local vars
X_varname = "Latitude (Modulus)"
Y_varname = "WOA_TEMP"
S_varname = 'Iodide'
S_label = S_varname
C_varname = S_varname
# - plot
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4,
s=df[S_varname], label=S_label, figsize=(10, 7),
c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False, ax=ax, fig=fig)
plt.show()
def plot_up_lat_varI_varII(restrict_data_max=True, restrict_min_salinity=True):
"""
Plot up a "pretty" plot of STT vs Lat, with scatter sizes and color by var.
"""
# - Get data as a DataFrame
df = obs.get_processed_df_obs_mod()
if restrict_data_max:
# df = df[ df['Iodide']< 450. ]
df = df[df['Iodide'] < 400.] # Updated to use 400 nM as upper value
if restrict_min_salinity:
df = df[df['WOA_Salinity'] > 30.]
df["Latitude (Modulus)"] = np.sqrt(df["Latitude"].copy()**2)
# - Local variables
# override? (unhashed)
varI = 'Iodide'
varII = "WOA_TEMP"
# name local vars
X_varname = "Latitude (Modulus)"
Y_varname = varI
S_varname = varII
S_label = S_varname
C_varname = S_varname
# - plot up
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind="scatter", x=X_varname, y=Y_varname, alpha=0.4,
s=df[S_varname], label=S_label, figsize=(10, 7),
c=S_varname, cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False, ax=ax, fig=fig)
plt.ylim(-5, 500)
plt.show()
def plot_chance_param(df=None, X_var='Temperature', Y_var='Iodide',
data_str='(Obs.) data'):
"""
Plot up chance et al (2014) param vs. data in DataFrame
"""
# Only include finite data points for temp
# ( NOTE: down to 1/3 of data of obs. data?! )
df = df[np.isfinite(df[X_var])]
# Add a variable for C**2 fit
Xvar2plot = X_var+'($^{2}$)'
df[Xvar2plot] = df[X_var].loc[:].values**2
# Plot up data and param.
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
# Plot up
df.plot(kind='scatter', x=Xvar2plot, y=Y_var, ax=ax)
# Add a line of best fit reported param.
actual_data = df[Xvar2plot].values
test_data = np.linspace(AC.myround(actual_data.min()),
AC.myround(actual_data.max()), 20)
m = 0.225
c = 19.0
plt.plot(test_data, ((test_data*m)+c), color='green', ls='--',
label='Chance et al (2014) param.')
# Limit axis to data
plt.xlim(-50, AC.myround(df[Xvar2plot].values.max(), 1000))
plt.ylim(-20, AC.myround(df[Y_var].values.max(), 50, round_up=True))
# Add title and axis labels
N = actual_data.shape[0]
title = 'Linear param vs. {} (N={})'.format(data_str, N)
plt.title(title)
plt.xlabel(X_var + ' ($^{o}$C$^{2}$)')
plt.ylabel(Y_var + ' (nM)')
plt.legend(loc='upper left')
# And show/save
tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_")
savetitle = 'Chance_param_vs_{}.png'.format(tmp_str)
plt.savefig(savetitle)
plt.show()
def plot_macdonald_param(df=None, X_var='Temperature', Y_var='Iodide',
data_str='(Obs.) data'):
"""
Plot up MacDonald et al (2014) param vs. data in DataFrame
"""
# Only include finite data points for temp
# ( NOTE: down to 1/3 of data of obs. data?! )
df = df[np.isfinite(df[X_var])]
# Add a variable for
Xvar2plot = '1/'+X_var
df[Xvar2plot] = 1. / (df[X_var].loc[:].values+273.15)
Y_var2plot = 'ln({})'.format(Y_var)
df[Y_var2plot] = np.log(df[Y_var].values)
# Plot up data and param.
fig, ax = plt.subplots(facecolor='w', edgecolor='w')
df.plot(kind='scatter', x=Xvar2plot, y=Y_var2plot, ax=ax)
# Add a line of best fit reported param.
# (run some numbers through this equation... )
actual_data = df[X_var].values + 273.15
test_data = np.linspace(actual_data.min(), actual_data.max(), 20)
test_data_Y = 1.46E6*(np.exp((-9134./test_data))) * 1E9
plt.plot(1./test_data, np.log(test_data_Y),
color='green', ls='--', label='MacDonald et al (2014) param.')
# Limit axis to data
plt.xlim(df[Xvar2plot].values.min()-0.000025,
df[Xvar2plot].values.max()+0.000025)
plt.ylim(0, 7)
# Add title and axis labels
N = actual_data.shape[0]
title = 'Arrhenius param vs. {} (N={})'.format(data_str, N)
plt.title(title)
plt.xlabel(Xvar2plot + ' ($^{o}$K)')
plt.ylabel(Y_var2plot + ' (nM)')
plt.legend(loc='lower left')
# And show/save
tmp_str = data_str.replace(" ", '_').replace("(", "_").replace(")", "_")
savetitle = 'MacDonald_parameterisation_vs_{}.png'.format(tmp_str)
plt.savefig(savetitle)
plt.show()
def plot_current_parameterisations():
"""
Plot up a comparison of Chance et al 2014 and MacDonald et al 2014 params.
"""
# - Get obs and processed data
# get raw obs
raw_df = get_core_Chance2014_obs()
# don't consider iodide values above 30
raw_df = raw_df[raw_df['Iodide'] > 30.]
# - get processed obs.
pro_df = obs.get_processed_df_obs_mod()
restrict_data_max, restrict_min_salinity = True, True
if restrict_data_max:
# pro_df = pro_df[ pro_df['Iodide'] < 450. ] # used for July Oi! mtg.
# restrict below 400 (per. com. RJC)
pro_df = pro_df[pro_df['Iodide'] < 400.]
if restrict_min_salinity:
pro_df = pro_df[pro_df['WOA_Salinity'] > 30.]
# - Plots with raw obs.
# Plot up "linear" fit of iodide and temperature. (Chance et al 2014)
# plot up Chance
# plot_chance_param(df=raw_df.copy())
# Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014)
plot_macdonald_param(df=raw_df.copy())
# - Plots with extract Vars.
# Plot up "linear" fit of iodide and temperature. (Chance et al 2014)
# plot_chance_param(df=pro_df.copy(), data_str='Extracted data',
# X_var='WOA_TEMP')
# Plot up "Arrhenius" fit of iodide and temperature. ( MacDonald et al 2014)
plot_macdonald_param(df=pro_df.copy(), data_str='Extracted data',
X_var='WOA_TEMP')
# ---------------------------------------------------------------------------
# ---------------- Misc. Support for iodide project ------------------------
# ---------------------------------------------------------------------------
def explore_diferences_for_Skagerak():
"""
Explore how the Skagerak data differs from the dataset as a whole
"""
# - Get the observations and model output
folder = utils.get_file_locations('data_root')
filename = 'Iodine_obs_WOA_v8_5_1_ENSEMBLE_csv__avg_nSkag_nOutliers.csv'
dfA = pd.read_csv(folder+filename, encoding='utf-8')
# - Local variables
diffvar = 'Salinity diff'
ds_str = 'Truesdale_2003_I'
obs_var_dict = {
# Temperature
'WOA_TEMP': 'Temperature',
# Chlorophyll-a
'SeaWIFs_ChlrA': 'Chl-a',
# Nitrate
'WOA_Nitrate': 'Nitrate',
# Salinity
'WOA_Salinity': 'Salinity'
# There is also 'Nitrite' and 'Ammonium'
}
# - Analysis / updates to DataFrames
dfA[diffvar] = dfA['WOA_Salinity'].values - dfA['diffvar'].values
# - Get just the Skagerak dataset
df = dfA.loc[dfA['Data_Key'] == ds_str]
prt_str = 'The general stats on the Skagerak dataset ({}) are: '
print(prt_str.format(ds_str))
# general stats on the iodide numbers
stats = df['Iodide'].describe()
for idx in stats.index.tolist():
vals = stats[stats.index == idx].values[0]
print('{:<10}: {:<10}'.format(idx, vals))
# - stats on the in-situ data
print('\n')
prt_str = 'The stats on the Skagerak ({}) in-situ ancillary obs. are: '
print(prt_str.format(ds_str))
# which in-situ variables are there
vals = df[obs_var_dict.values()].count()
prt_str = "for in-situ variable '{:<15}' there are N={} values"
for idx in vals.index.tolist():
vals2prt = vals[vals.index == idx].values[0]
print(prt_str.format(idx, vals2prt))
def check_numbers4old_chance_and_new_chance():
"""
Do checks on which datasets have changed between versions
"""
# - Get all observational data
NIU, md_df = obs.get_iodide_obs()
folder = '/work/home/ts551/data/iodide/'
filename = 'Iodide_data_above_20m_v8_5_1.csv'
df = pd.read_csv(folder+filename)
df = df[np.isfinite(df['Iodide'])] # remove NaNs
verOrig = 'v8.5.1'
NOrig = df.shape[0]
# Add the is chance flag to the dataset
ChanceStr = 'In Chance2014?'
df[ChanceStr] = None
for ds in list(set(md_df['Data_Key'])):
bool = df['Data_Key'] == ds
IsChance = md_df.loc[md_df['Data_Key'] == ds, ChanceStr].values[0]
df.loc[bool, ChanceStr] = IsChance
# Where are the new iodide data points
newLODds = set(df.loc[df['ErrorFlag'] == 7]['Data_Key'])
prt_str = 'The new datasets from ErrorFlag 7 are in: {}'
print(prt_str.format(' , '.join(newLODds)))
# Versions with a different number of iodide values
filename = 'Iodide_data_above_20m_v8_2.csv'
df2 = pd.read_csv(folder + filename)
df2 = convert_old_Data_Key_names2new(df2) # Use data descriptor names
df2 = df2[np.isfinite(df2['Iodide'])] # remove NaNs
ver = '8.2'
prt_str = 'Version {} of the data - N={} (vs {} N={})'
print(prt_str.format(ver, df2.shape[0], verOrig, NOrig))
# Do analysis by dataset
for ds in list(set(md_df['Data_Key'])):
N0 = df.loc[df['Data_Key'] == ds, :].shape[0]
N1 = df2.loc[df2['Data_Key'] == ds, :].shape[0]
IsChance = list(set(df.loc[df['Data_Key'] == ds, ChanceStr]))[0]
prt_str = "DS: '{}' (Chance2014={}) has changed by {} to {} ({} vs. {})"
if N0 != N1:
print(prt_str.format(ds, IsChance, N0-N1, N0, verOrig, ver))
def get_numbers_for_data_paper():
"""
Get various numbers/analysis for data descriptor paper
"""
# - Get the full iodide sea-surface dataset
filename = 'Iodide_data_above_20m.csv'
folder = utils.get_file_locations('s2s_root')+'/Iodide/inputs/'
df = pd.read_csv(folder + filename, encoding='utf-8')
# Exclude non finite data points.
df = df.loc[np.isfinite(df['Iodide']), :]
# Save the full data set as .csv for use in Data Descriptor paper
cols2use = [
u'Data_Key', u'Data_Key_ID', 'Latitude', u'Longitude',
# u'\xce\xb4Iodide',
'Year',
# u'Month (Orig.)', # This is RAW data, therefore Month is observation one
u'Month',
'Day',
'Iodide', u'Iodide',
'ErrorFlag', 'Method', 'Coastal', u'LocatorFlag',
]
df = df[cols2use]
# Map references to final .csv from metadata
md_df = obs.get_iodide_obs_metadata()
col2use = u'Reference'
Data_keys = set(df['Data_Key'].values)
for Data_key in Data_keys:
# Get ref for dataset from metadata
bool_ = md_df[u'Data_Key'] == Data_key
REF = md_df.loc[bool_, :][col2use].values[0].strip()
# Add to main data array
bool_ = df[u'Data_Key'] == Data_key
df.loc[bool_, col2use] = REF
# Round up the iodide values
df['Iodide'] = df['Iodide'].round(1).values
df[u'Iodide'] = df[u'Iodide'].round(1).values
df[u'Longitude'] = df[u'Longitude'].round(6).values
df[u'Latitude'] = df[u'Latitude'].round(6).values
# Now lock in values by settings to strings.
df[cols2use] = df[cols2use].astype(str)
# save the resultant file out
filename = 'Oi_prj_Iodide_obs_surface4DataDescriptorPaper.csv'
df.to_csv(filename, encoding='utf-8')
# Get number of samples of iodide per dataset
md_df = obs.get_iodide_obs_metadata()
md_df.index = md_df['Data_Key']
s = pd.Series()
Data_Keys = md_df['Data_Key']
for Data_Key in Data_Keys:
df_tmp = df.loc[df['Data_Key'] == Data_Key]
s[Data_Key] = df_tmp.shape[0]
md_df['n'] = s
md_df.index = np.arange(md_df.shape[0])
md_df.to_csv('Oi_prj_metadata_with_n.csv', encoding='utf-8')
# Check sum for assignment?
prt_str = '# Assigned values ({}) should equal original DataFrame size:{}'
print(prt_str.format(md_df['n'].sum(), str(df.shape[0])))
# Get number of samples of iodide per obs. technique
Methods = set(df['Method'])
s_ds = pd.Series()
s_n = pd.Series()
for Method in Methods:
df_tmp = df.loc[df['Method'] == Method]
s_n[Method] = df_tmp.shape[0]
s_ds[Method] = len(set(df_tmp['Data_Key']))
# Combine and save
dfS = pd.DataFrame()
dfS['N'] = s_n
dfS['datasets'] = s_ds
dfS.index.name = 'Method'
# Reset index
index2use = [str(i) for i in sorted(pd.to_numeric(dfS.index))]
dfS = dfS.reindex(index2use)
dfS.to_csv('Oi_prj_num_in_Methods.csv', encoding='utf-8')
# Check sum on assignment of methods
prt_str = '# Assigned methods ({}) should equal original DataFrame size:{}'
print(prt_str.format(dfS['N'].sum(), str(df.shape[0])))
prt_str = '# Assigned datasets ({}) should equal # datasets: {}'
print(prt_str.format(dfS['datasets'].sum(), len(set(df['Data_Key']))))
# Check which methods are assign to each dataset
dfD = pd.DataFrame(index=sorted(set(df['Method'].values)))
S = []
for Data_Key in Data_Keys:
df_tmp = df.loc[df['Data_Key'] == Data_Key]
methods_ = set(df_tmp['Method'].values)
dfD[Data_Key] = pd.Series(dict(zip(methods_, len(methods_)*[True])))
# Do any datasets have more than one method?
print('These datasets have more than one method: ')
print(dfD.sum(axis=0)[dfD.sum(axis=0) > 1])
def mk_PDF_plot_for_Data_descriptor_paper():
"""
Make a PDF plot for the data descriptor paper
"""
import seaborn as sns
sns.set(color_codes=True)
# Get the data
df = obs.get_processed_df_obs_mod() # NOTE this df contains values >400nM
# df = df.loc[df['Iodide'] <400, : ]
# split data into all, Coastal and Non-Coastal
dfs = {}
dfs['All'] = df.copy()
dfs['Coastal'] = df.loc[df['Coastal'] == 1, :]
dfs['Non-coastal'] = df.loc[df['Coastal'] != 1, :]
# if hist=True, use a count instead of density
hist = False
# Loop and plot
axlabel = '[I$^{-}_{aq}$] (nM)'
fig, ax = plt.subplots()
vars2plot = dfs.keys()
for key in vars2plot:
sns.distplot(dfs[key]['Iodide'].values, ax=ax,
axlabel=axlabel, label=key, hist=hist)
# force y axis extend to be correct
ax.autoscale()
# Add a legend
plt.legend()
# Add a label for the Y axis
plt.ylabel('Density')
# save plot
if hist:
savename = 'Oi_prj_Data_descriptor_PDF'
else:
savename = 'Oi_prj_Data_descriptor_PDF_just_Kernal'
plt.savefig(savename+'.png', dpi=dpi)
def mk_pf_files4Iodide_cruise(dfs=None, test_input_files=False,
mk_column_output_files=False, num_tracers=103):
"""
Make planeflight input files for iodide cruises
"""
# Get locations for cruises as
if isinstance(dfs, type(None)):
dfs = get_iodide_cruise_data_from_Anoop_txt_files()
# Test the input files?
if test_input_files:
test_input_files4Iodide_cruise_with_plots(dfs=dfs)
# Make planeflight files for DataFrames of cruises data (outputting columns values)
if mk_column_output_files:
# slist = ['O3', 'IO', 'BrO', 'CH2O']
slist = ['TRA_002', 'TRA_046', 'TRA_092', 'TRA_020', 'GLYX']
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
slist = slist + met_vars
for key_ in dfs.keys():
print(key_, dfs[key_].shape)
df = dfs[key_].dropna()
print(df.shape)
# add TYPE flag
df['TYPE'] = 'IDC'
# Grid box level centers [hPa]
alts_HPa = AC.gchemgrid('c_hPa_geos5_r')
# Loop and add in column values
dfs_all = []
for n_alt, hPa_ in enumerate(alts_HPa):
print(hPa_, n_alt)
df_ = df.copy()
df_['PRESS'] = hPa_
dfs_all += [df_]
df = pd.concat(dfs_all)
# make sure rows are in date order
df.sort_values(['datetime', 'PRESS'], ascending=True, inplace=True)
# now output files
AC.prt_PlaneFlight_files(df=df, slist=slist)
# Make planeflight files for DataFrames of cruises data
# (outputting surface values)
else:
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
assert isinstance(num_tracers, int), 'num_tracers must be an integer'
slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)]
species = ['OH', 'HO2', 'GLYX']
slist = slist + species + met_vars
for key_ in dfs.keys():
print(key_)
df = dfs[key_].dropna()
# add TYPE flag
df['TYPE'] = 'IDS'
#
df['PRESS'] = 1013.0
# now output files
AC.prt_PlaneFlight_files(df=df, slist=slist)
def test_input_files4Iodide_cruise_with_plots(dfs=None, show=False):
""""
Plot up maps of iodide cruise routes
"""
# Get locations for cruises as
if isinstance(dfs, type(None)):
dfs = get_iodide_cruise_data_from_Anoop_txt_files()
# - Test input files
# file to save?
savetitle = 'GC_pf_input_iodide_cruises'
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
vars2test = ['LON', 'LAT']
for key_ in dfs.keys():
df = dfs[key_]
for var_ in vars2test:
# -- Plot X vs Y plot
df_tmp = df[['datetime', var_]]
# calc NaNs
VAR_dropped_N = int(df_tmp.shape[0])
df_tmp = df_tmp.dropna()
VAR_N_data = int(df_tmp.shape[0])
VAR_dropped_N = VAR_dropped_N-VAR_N_data
# plot
df_tmp.plot(x='datetime', y=var_)
#
title = "Timeseries of '{}' for '{}'".format(var_, key_)
title += ' (ALL N={}, exc. {} NaNs)'.format(VAR_N_data,
VAR_dropped_N)
plt.title(title)
# Save / show
file2save_str = 'Iodide_input_file_{}_check_{}.png'.format(
key_, var_)
plt.savefig(file2save_str)
if show:
plt.show()
print(df_tmp[var_].describe())
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
# -- Plot up cruise track as map
del df_tmp
df_tmp = df.dropna()
lons = df_tmp['LON'].values
lats = df_tmp['LAT'].values
title = "Cruise track for '{}'".format(key_)
print('!'*100, 'plotting map for: ', key_)
AC.plot_lons_lats_spatial_on_map(lons=lons, lats=lats, title=title)
plt.ylim(AC.myround(lats.min()-20, 10, ),
AC.myround(lats.max()+20, 10, round_up=True))
plt.xlim(AC.myround(lons.min()-20, 10, ),
AC.myround(lons.max()+20, 10, round_up=True))
if show:
plt.show()
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_iodide_cruise_data_from_Anoop_txt_files(verbose=False):
"""
Get observational data and locations from Anoop's txt files
"""
# - Local variables
folder = utils.get_file_locations('data_root')
folder += 'LOCS_Inamdar_Mahajan_cruise_x3/'
cruise_files = {
# 1 8th Southern Ocean Expedition (SOE-8), possibly on the RV Sagar Nidhi
# 'Iodide1': 'cruise1_2014.xlsx',
'SOE-8': 'cruise1_2014.xlsx',
# 2 2nd International Indian Ocean Expedition (<-2),
# possibly one of several cruises in this program
# (IIOE-1 was decades ago). On board RV Sagar Nidhi.
# 'Iodide2': 'cruise2_2015.xlsx',
'IIOE-1': 'cruise2_2015.xlsx',
# 3 9th Southern Ocean Expedition (SOE-9), cruise Liselotte Tinel took samples on
# Ship RV Agulhas.
# 'Iodide3': 'cruise3_2016.xlsx',
'SOE-9': 'cruise3_2016.xlsx',
}
# - Extract data
dfs = {}
for cruise_name in cruise_files.keys():
print('Extracting: ', cruise_name, cruise_files[cruise_name])
# cruise_name = cruise_files.keys()[0]
df = pd.read_excel(folder+cruise_files[cruise_name])
names_dict = {
'Date': 'date', 'UTC': 'date', 'time (UTC)': 'time', 'lat': 'LAT',
'lon': 'LON'
}
if verbose:
print(df.head())
df.rename(columns=names_dict, inplace=True)
if verbose:
print(df.head())
# convert dates to datetime
# def _convert_datetime(x):
# return (270-atan2(x['date'],x['GMAO_UWND'])*180/pi)%360
# df['datetime'] = df.apply( f, axis=1)
df['datetime'] = df['date'].astype(str)+' '+df['time'].astype(str)
df['datetime'] = pd.to_datetime(df['datetime'])
df.index = df['datetime'].values
if verbose:
print(df.head())
dfs[cruise_name] = df[['datetime', 'LON', 'LAT']]
return dfs
def TEST_AND_PROCESS_iodide_cruise_output(just_process_surface_data=False):
"""
Process, plot (test values), then save planeflight values to csv
"""
# Local variables
wd = '/scratch/ts551/GC/v10-01_HAL/'
files_dict = {
'SOE-8': wd+'run.ClBr.Iodide2015.SOE-8',
'IIOE-1': wd+'run.ClBr.Iodide2016.IIOE-1',
'SOE-9': wd+'run.ClBr.Iodide2017.SOE-9',
}
# Test surface output
if just_process_surface_data:
extra_str = 'surface'
dfs = {}
for key_ in files_dict.keys():
wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=wd)
dfs[key_] = df
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(key_, extra_str))
# Save the output as .csv
for key_ in dfs.keys():
savetitle = 'GC_planeflight_compiled_output_for_{}_{}.csv'
savetitle = savetitle.format(key_, extra_str)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
dfs[key_].to_csv(savetitle)
# - Process the output files for column values
else:
specs = ['O3', 'BrO', 'IO', 'CH2O']
extra_str = 'column'
dfs = {}
file_str = 'GC_planeflight_compiled_output_for_{}_{}_II.csv'
for key_ in files_dict.keys():
# for key_ in ['IIOE-1']:
print(key_)
pf_wd = files_dict[key_]+'/plane_flight_logs_{}/'.format(extra_str)
df = process_planeflight_files(wd=pf_wd)
# now process to column values
df = process_planeflight_column_files(wd=files_dict[key_], df=df)
dfs[key_] = df
# Save the output as .csv
savetitle = file_str.format(key_, extra_str)
df['datetime'] = df.index
df.to_csv(AC.rm_spaces_and_chars_from_str(savetitle))
# Test plots?
for key_ in files_dict.keys():
savetitle = file_str.format(key_, extra_str)
df = pd.read_csv(AC.rm_spaces_and_chars_from_str(savetitle))
df.index = pd.to_datetime(df['datetime'])
get_test_plots_surface_pf_output(df=df,
name='{} ({})'.format(
key_, extra_str),
specs=specs, units='molec cm$^{-2}$',
scale=1)
def process_planeflight_column_files(wd=None, df=None, res='4x5', debug=False):
"""
Process column of v/v values into single values for total column
"""
# wd=files_dict[key_]; df = dfs[ key_ ]; res='4x5'
specs = ['O3', u'BrO', u'IO', u'CH2O', u'GLYX']
timestamps = list(sorted(set(df.index)))
timestamps_with_duplicates = []
RMM_air = AC.constants('RMM_air')
AVG = AC.constants('AVG')
specs = ['O3', 'BrO', 'IO', 'CH2O']
# get lon lat array of time in troposphere
TPS = AC.get_GC_output(wd=wd+'/', vars=['TIME_TPS__TIMETROP'],
trop_limit=True)
# convert this to boolean (<1 == not strat)
TPS[TPS != 1] = 9999.9
TPS[TPS == 1] = False
TPS[TPS == 9999.9] = True
# And dates
CTM_DATES = AC.get_gc_datetime(wd=wd+'/')
CTM_months = np.array([i.month for i in CTM_DATES])
# a EPOCH = datetime.datetime(1970,1,1)
# CTM_EPOCH = np.array([ (i.month-EPOCH).total_seconds() for i in CTM_DATES ])
# Also get grid of surface area ( m^2 ) and convert to cm2
S_AREA = AC.get_surface_area(res=res) * 10000
A_M = AC.get_GC_output(wd, vars=['BXHGHT_S__AD'], trop_limit=True,
dtype=np.float64)
# VOL = AC.get_volume_np( wd=wd, res=res, s_area=S_AREA[...,None])
big_data_l = []
dates = []
# for ts in timestamps[::1000]: # Test processing on first 1000 points
n_timestamps = len(timestamps)
for n_ts, ts in enumerate(timestamps):
print('progress= {:.3f} %'.format((float(n_ts) / n_timestamps)*100.))
tmp_df = df.loc[df.index == ts]
if debug:
print(ts, tmp_df.shape)
# List of pressures (one set = 47 )
PRESS_ = tmp_df['PRESS'].values
# special condition for where there is more than column set
# for a timestamp
# assert( len(PRESS) == 47 )
if len(PRESS_) != 47:
timestamps_with_duplicates += [ts]
prt_str = 'WARNING: DOUBLE UP IN TIMESTEP:{} ({}, shape={})'
print(prt_str.format(ts, len(PRESS_), tmp_df.shape))
print('Just using 1st 47 values')
tmp_df = tmp_df[0:47]
dates += [ts]
else:
dates += [ts]
# Now reverse data (as outputted from highest to lowest)
tmp_df = tmp_df.loc[::-1]
# select everyother value?
# lon select locations
LAT_ = tmp_df['LAT'].values
LON_ = tmp_df['LON'].values
# check there is only one lat and lon
assert len(set(LAT_)) == 1
assert len(set(LON_)) == 1
# - Select 3D vars from ctm.nc file
# get LON, LAT index of box
LON_ind = AC.get_gc_lon(LON_[0], res=res)
LAT_ind = AC.get_gc_lat(LAT_[0], res=res)
# time_ind = AC.find_nearest( CTM_EPOCH, (ts-EPOCH).total_seconds() )
time_ind = AC.find_nearest(CTM_months, ts.month)
# tropspause height? ('TIME_TPS__TIMETROP)
TPS_ = TPS[LON_ind, LAT_ind, :, time_ind]
# Select surface area of grid box
S_AREA_ = S_AREA[LON_ind, LAT_ind, 0]
# comput column by spec
A_M_ = A_M[LON_ind, LAT_ind, :, time_ind]
# Number of molecules per grid box
MOLECS_ = (((A_M_*1E3) / RMM_air) * AVG)
# Extract for species
data_l = []
for spec in specs:
# Get species in v/v
data_ = tmp_df[spec].values
# Mask for troposphere
data_ = np.ma.array(data_[:38], mask=TPS_)
# Get number of molecules
data_ = (data_ * MOLECS_).sum()
# Convert to molecs/cm2
data_ = data_ / S_AREA_
# Store data
data_l += [data_]
# Save location
data_l += [LON_[0], LAT_[0]]
# Save data for all specs
big_data_l += [data_l]
# Convert to DataFrame.
df_col = pd.DataFrame(big_data_l)
df_col.index = dates # timestamps[::1000]
df_col.columns = specs + ['LON', 'LAT']
print(df_col.shape)
return df_col
def process_planeflight_files(wd=None):
"""
Process planeflight files to pd.DataFrame
"""
import glob
import seaborn as sns
sns.set_context("paper", font_scale=0.75)
# Get planeflight data
files = glob.glob(wd+'plane.log.*')
print(wd, len(files), files[0])
names, POINTS = AC.get_pf_headers(files[0])
dfs = [AC.pf_csv2pandas(file=i, vars=names) for i in files]
df = pd.concat(dfs)
# Rename axis
TRA_XXs = [i for i in df.columns if ('TRA_' in i)]
TRA_dict = dict(
zip(TRA_XXs, [v10_ClBrI_TRA_XX_2_name(i) for i in TRA_XXs]))
df.rename(columns=TRA_dict, inplace=True)
return df
def get_test_plots_surface_pf_output(wd=None, name='Planeflight',
df=None, specs=None, units=None, scale=1,
show_plot=False):
"""
Test model output at surface for Indian sgip cruises
"""
import seaborn as sns
sns.set(color_codes=True)
# Get data
if isinstance(df, type(None)):
df = process_planeflight_files(wd=wd, name=name)
# Now add summary plots
dpi = 320
savetitle = 'GC_planeflight_summary_plots_for_{}_V'.format(name)
savetitle = AC.rm_spaces_and_chars_from_str(savetitle)
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=True)
# Locations outputted for?
title = 'Locations of {} output'.format(name)
fig, ax = plt.subplots()
AC.plot_lons_lats_spatial_on_map(title=title, f_size=15,
lons=df['LON'].values, lats=df['LAT'].values,
fig=fig, ax=ax)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
# Timeseries of key species
if isinstance(specs, type(None)):
key_spec = ['O3', 'NO', 'NO2', 'OH', 'HO2', 'IO', 'BrO']
extras = ['SO4', 'DMS', 'CH2O', ]
species = ['OH', 'HO2', 'GLYX']
specs = key_spec + extras + species
specs += ['LON', 'LAT']
met = ['GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP',
'GMAO_UWND', 'GMAO_VWND']
specs += met
print(specs)
for spec in specs:
fig, ax = plt.subplots()
if isinstance(units, type(None)):
units, scale = AC.tra_unit(spec, scale=True)
try:
spec_LaTeX = AC.latex_spec_name(spec)
except:
spec_LaTeX = spec
print(spec, units, spec_LaTeX, scale)
dates = pd.to_datetime(df.index).values
plt.plot(dates, df[spec].values*scale)
plt.ylabel('{} ({})'.format(spec, units))
title_str = "Timeseries of modelled '{}' during {}"
plt.title(title_str.format(spec_LaTeX, name))
plt.xticks(rotation=45)
plt.subplots_adjust(bottom=0.15)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=True)
if show_plot:
plt.show()
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=True)
def mk_data_files4Indian_seasurface_paper(res='0.125x0.125'):
"""
Make data files for the indian ocean surface iodide paper
"""
AreasOfInterest = {
'SubT_NA': ('NASW', 'NATR', 'NASE', ),
'SubT_SA': ('SATL',),
'SubT_NP': (u'NPSW', 'NPTG'),
'SubT_SP': ('SPSG',),
'SubT_SI': ('ISSG',),
}
AreasOfInterest_Names = AreasOfInterest.copy()
# Get dictionaries of province numbers and names
num2prov = LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
MRnum2prov = MarineRegionsOrg_LonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
Rnum2prov = RosieLonghurstProvinceFileNum2Province(
None, invert=True, rtn_dict=True)
# Convert regions to the LP numbers
PrtStr = "{} = Requested province: {} - R's #={}, MIT(GitHub) #={}, LH(2010) #={}"
for key_ in AreasOfInterest.keys():
for a_ in AreasOfInterest[key_]:
print(PrtStr.format(
key_, a_, Rnum2prov[a_], num2prov[a_], MRnum2prov[a_]))
nums = [MRnum2prov[i] for i in AreasOfInterest[key_]]
AreasOfInterest[key_] = nums
# - Get data all together
Filename = 'Oi_prj_predicted_iodide_0.125x0.125_No_Skagerrak_WITH_Provinces.nc'
# folder = '/work/home/ts551/data/iodide/'
folder = './'
ds = xr.open_dataset(folder + Filename)
params = ['Chance2014_STTxx2_I',
'MacDonald2014_iodide', 'Ensemble_Monthly_mean']
vars2use = params + ['LonghurstProvince']
ds = ds[vars2use]
# Also add the features of interest
Filename = 'Oi_prj_feature_variables_0.125x0.125_WITH_Provinces.nc'
ds2 = xr.open_dataset(folder + Filename)
vars2add = ['WOA_MLDpt', 'WOA_Nitrate', 'WOA_TEMP', 'WOA_Salinity']
for var in vars2add:
ds[var] = ds2[var]
# Add axis X/Y assignment
attrs = ds['lat'].attrs
attrs["axis"] = 'Y'
ds['lat'].attrs = attrs
attrs = ds['lon'].attrs
attrs["axis"] = 'X'
ds['lon'].attrs = attrs
# - Now extract the data and check the locations being extracted
# Make files with the data of interest.
file_str = 'Oi_OS_Longhurst_provinces_{}_{}_{}.{}'
for key_ in AreasOfInterest.keys():
nums = AreasOfInterest[key_]
ds_tmp = ds.where(np.isin(ds.LonghurstProvince.values, nums))
# - Plot a diagnostic figure
fig, ax = plt.subplots()
ds_tmp['LonghurstProvince'].mean(dim='time').plot(ax=ax)
#get names and numbers of assigned areas
Names = AreasOfInterest_Names[key_]
nums = [str(i) for i in AreasOfInterest[key_]]
# Add a title
nums = [str(i) for i in nums]
title = "For '{}' ({}), \n plotting #(s)={}"
title = title.format(key_, ', '.join(Names), ', '.join(nums))
plt.title(title)
# Save to png
png_filename = file_str.format(key_, '', res, 'png')
plt.savefig(png_filename, dpi=dpi)
plt.close()
# - What is the area extent of the data
var2use = 'WOA_Nitrate'
ds_lat = ds_tmp[var2use].dropna(dim='lat', how='all')
min_lat = ds_lat['lat'].min() - 2
max_lat = ds_lat['lat'].max() + 2
ds_lon = ds_tmp[var2use].dropna(dim='lon', how='all')
min_lon = ds_lon['lon'].min() - 2
max_lon = ds_lon['lon'].max() + 2
# - Now save by species
vars2save = [i for i in ds_tmp.data_vars if i != 'LonghurstProvince']
for var_ in vars2save:
print(var_)
da = ds_tmp[var_]
# select the minimum area for the areas
da = da.sel(lat=(da.lat >= min_lat))
da = da.sel(lat=(da.lat < max_lat))
if key_ in ('SubT_NP' 'SubT_SP'):
print('just limiting lat for: {}'.format(key_))
else:
da = da.sel(lon=(da.lon >= min_lon))
da = da.sel(lon=(da.lon < max_lon))
# Save the data to NetCDF.
filename = file_str.format(key_, var_, res, '')
filename = AC.rm_spaces_and_chars_from_str(filename)
da.to_netcdf(filename+'.nc')
# ---------------------------------------------------------------------------
# --------------- Functions for Atmospheric impacts work -------------------
# ---------------------------------------------------------------------------
def Do_analysis_and_mk_plots_for_EGU19_poster():
"""
Driver function for analysis and plotting for EGU poster
"""
# - Get data
# data locations and names as a dictionary
wds = get_run_dict4EGU_runs()
runs = list(sorted(wds.keys()))
# Get emissions
dsDH = GetEmissionsFromHEMCONetCDFsAsDatasets(wds=wds)
# Process the datasets?
# a = [ AC.get_O3_burden( wd=wds[i] ) for i in runs ]
# Get datasets objects from directories and in a dictionary
dsD = {}
for run in runs:
ds = xr.open_dataset(wds[run]+'ctm.nc')
dsD[run] = ds
# - Do analysis
# Get summary emission stats
Check_global_statistics_on_emissions(dsDH=dsDH)
# Look at differences in surface concentration.
extra_str = 'EGU_runs_surface_Iy_stats_'
df = evalulate_burdens_and_surface_conc(run_dict=wds, extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'Macdonald2014'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'Chance2014'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'ML_Iodide'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# Get general statistics about the emissions vs. Macdoanld et al 2014
REF1 = 'No_HOI_I2'
extra_str = 'EGU_runs_general_stats_vs_{}_'.format(REF1)
df = AC.get_general_stats4run_dict_as_df(run_dict=wds, REF1=REF1,
extra_str=extra_str)
# - Get spatial plots
# plot up emissions
plot_up_surface_emissions(dsDH=dsDH)
# - Do diferences plots
# - look at the HOI/I2 surface values and IO.
# species to look at?
specs = ['O3', 'NO2', 'IO', 'HOI', 'I2']
# Chance vs. ML_iodide
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Chance2014',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. ML_iodide
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. Chance
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='Chance2014', specs=specs,
update_PyGChem_format2COARDS=True)
# Macdonald vs. No_HOI_I2
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='Macdonald2014',
NEW='No_HOI_I2', specs=specs,
update_PyGChem_format2COARDS=True)
# ML_iodide vs. No_HOI_I2
AC.plot_up_surface_changes_between2runs(ds_dict=dsD, BASE='No_HOI_I2',
NEW='ML_Iodide', specs=specs,
update_PyGChem_format2COARDS=True)
# ds_dict=dsD.copy(); BASE='Macdonald2014'; NEW='ML_Iodide'
# - Get production figures.
# surface ozone figure - made in powerpoint for now...
# Plot up emissions for EGU presentation
BASE = 'ML_Iodide'
DIFF1 = 'Chance2014'
DIFF2 = 'Macdonald2014'
plot_up_EGU_fig05_emiss_change(ds_dict=dsD, BASE=BASE, DIFF1=DIFF1, DIFF2=DIFF2,
update_PyGChem_format2COARDS=True)
def plot_up_EGU_fig05_emiss_change(ds_dict=None, levs=[1], specs=[],
BASE='', DIFF1='', DIFF2='', prefix='IJ_AVG_S__',
update_PyGChem_format2COARDS=False):
"""
Plot up the change in emissions for EGU poster
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# Species to plot
vars2use = [prefix+i for i in specs]
unit = None
PDFfilenameStr = 'Oi_surface_change_{}_vs_{}_lev_{:0>2}'
# Set datasets to use and Just include the variables to plot in the dataset
title1 = BASE
title2 = DIFF1
title2 = DIFF2
ds1 = ds_dict[BASE][vars2use].copy()
ds2 = ds_dict[DIFF1][vars2use].copy()
ds2 = ds_dict[DIFF2][vars2use].copy()
# Average over time
print(ds1, ds2, ds3)
ds1 = ds1.mean(dim='time')
ds2 = ds2.mean(dim='time')
ds3 = ds3.mean(dim='time')
# Remove vestigial coordinates.
# (e.g. the time_0 coord... what is this?)
vars2drop = ['time_0']
dsL = [ds1, ds2, ds3]
for var2drop in vars2drop:
for n, ds in enumerate(dsL):
CoordVars = [i for i in ds.coords]
if var2drop in CoordVars:
ds = ds.drop(var2drop)
dsL[n] = ds
ds1, ds2, ds3 = dsL
# Update dimension names
if update_PyGChem_format2COARDS:
ds1 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds1)
ds2 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds2)
ds3 = Convert_PyGChem_Iris_DataSet2COARDS_NetCDF(ds=ds3)
# Setup plot
# plot up map with mask present
fig = plt.figure(figsize=(10, 6))
vmin = -100
vmax = 100
# Add initial plot
axn = [1, 1, 1]
ax = fig.add_subplot(*axn, projection=ccrs.Robinson(), aspect='auto')
ax.plot.imshow(x='lon', y='lat', ax=ax,
vmin=vmin, vmax=vmax,
transform=ccrs.PlateCarree())
plt.title(savename)
plt.savefig(savename+'.png')
plt.close()
def evalulate_burdens_and_surface_conc(run_dict=None, extra_str='', REF1=None,
REF2=None, REF_wd=None, res='4x5', trop_limit=True,
save2csv=True, prefix='GC_', run_names=None,
debug=False):
"""
Check general statistics on the CTM model runs
"""
# Extract names and locations of data
if isinstance(run_dict, type(None)):
run_dict = get_run_dict4EGU_runs()
if isinstance(run_names, type(None)):
run_names = sorted(run_dict.keys())
wds = [run_dict[i] for i in run_names]
# Mass unit scaling
mass_scale = 1E3
mass_unit = 'Tg'
# v/v scaling?
ppbv_unit = 'ppbv'
ppbv_scale = 1E9
pptv_unit = 'pptv'
pptv_scale = 1E12
# Get shared variables from a single model run
if isinstance(REF_wd, type(None)):
REF_wd = wds[0]
# get time in the troposphere diagnostic
t_p = AC.get_GC_output(wd=REF_wd, vars=[u'TIME_TPS__TIMETROP'],
trop_limit=True)
# Temperature
K = AC.get_GC_output(wd=REF_wd, vars=[u'DAO_3D_S__TMPU'], trop_limit=True)
# airmass
a_m = AC.get_air_mass_np(wd=REF_wd, trop_limit=True)
# Surface area?
s_area = AC.get_surface_area(res)[..., 0] # m2 land map
# ----
# - Now build analysis in pd.DataFrame
#
# - Tropospheric burdens?
# Get tropospheric burden for run
varname = 'O3 burden ({})'.format(mass_unit)
ars = [AC.get_O3_burden(i, t_p=t_p).sum() for i in wds]
df = pd.DataFrame(ars, columns=[varname], index=run_names)
# Get NO2 burden
NO2_varname = 'NO2 burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='NO2', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to N equivalent
ars = [i/AC.species_mass('NO2')*AC.species_mass('N') for i in ars]
df[NO2_varname] = ars
# Get NO burden
NO_varname = 'NO burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='NO', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to N equivalent
ars = [i/AC.species_mass('NO')*AC.species_mass('N') for i in ars]
df[NO_varname] = ars
# Combine NO and NO2 to get NOx burden
NOx_varname = 'NOx burden ({})'.format(mass_unit)
df[NOx_varname] = df[NO2_varname] + df[NO_varname]
# Get HOI burden
varname = 'HOI burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='HOI', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('HOI')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Get I2 burden
varname = 'I2 burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='I2', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('I2')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Get I2 burden
varname = 'IO burden ({})'.format(mass_unit)
ars = [AC.get_trop_burden(spec='IO', t_p=t_p, wd=i, all_data=False).sum()
for i in wds]
# convert to I equivalent
ars = [i/AC.species_mass('IO')*AC.species_mass('I') for i in ars]
df[varname] = ars
# Scale units
for col_ in df.columns:
if 'Tg' in col_:
df.loc[:, col_] = df.loc[:, col_].values/mass_scale
# - Surface concentrations?
# Surface ozone
O3_sur_varname = 'O3 surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='O3', wd=i, s_area=s_area)
for i in wds]
df[O3_sur_varname] = ars
# Surface NOx
NO_sur_varname = 'NO surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='NO', wd=i, s_area=s_area)
for i in wds]
df[NO_sur_varname] = ars
NO2_sur_varname = 'NO2 surface ({})'.format(ppbv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='NO2', wd=i, s_area=s_area)
for i in wds]
df[NO2_sur_varname] = ars
NOx_sur_varname = 'NOx surface ({})'.format(ppbv_unit)
df[NOx_sur_varname] = df[NO2_sur_varname] + df[NO_sur_varname]
# Surface HOI
HOI_sur_varname = 'HOI surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='HOI', wd=i, s_area=s_area)
for i in wds]
df[HOI_sur_varname] = ars
# Surface I2
I2_sur_varname = 'I2 surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='I2', wd=i, s_area=s_area)
for i in wds]
df[I2_sur_varname] = ars
# Surface I2
I2_sur_varname = 'IO surface ({})'.format(pptv_unit)
ars = [AC.get_avg_surface_conc_of_X(spec='IO', wd=i, s_area=s_area)
for i in wds]
df[I2_sur_varname] = ars
# - Scale units
for col_ in df.columns:
if 'ppbv' in col_:
df.loc[:, col_] = df.loc[:, col_].values*ppbv_scale
if 'pptv' in col_:
df.loc[:, col_] = df.loc[:, col_].values*pptv_scale
# - Processing and save?
# Calculate % change from base case for each variable
if not isinstance(REF1, type(None)):
for col_ in df.columns:
pcent_var = col_+' (% vs. {})'.format(REF1)
df[pcent_var] = (df[col_]-df[col_][REF1]) / df[col_][REF1] * 100
if not isinstance(REF2, type(None)):
for col_ in df.columns:
pcent_var = col_+' (% vs. {})'.format(REF2)
df[pcent_var] = (df[col_]-df[col_][REF2]) / df[col_][REF2] * 100
# Re-order columns
df = df.reindex_axis(sorted(df.columns), axis=1)
# Reorder index
df = df.T.reindex_axis(sorted(df.T.columns), axis=1).T
# Now round the numbers
df = df.round(3)
# Save csv to disk
csv_filename = '{}_summary_statistics{}.csv'.format(prefix, extra_str)
df.to_csv(csv_filename)
# return the DataFrame too
return df
def Check_sensitivity_of_HOI_I2_param2WS():
"""
Check the sensitivity of the Carpenter et al 2013 parameterisation to wind speed
"""
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper", font_scale=1.75)
import matplotlib.pyplot as plt
# Core calculation for HOI emission
def calc_HOI_flux_eqn_20(I=None, O3=None, WS=None, ):
""" Eqn 20 from Carpenter et al 2013 """
return O3 * ((4.15E5 * (np.sqrt(I) / WS)) -
(20.6 / WS) - (2.36E4 * np.sqrt(I)))
# Slightly simpler calculation for HOI emission
def calc_HOI_flux_eqn_21(I=None, O3=None, WS=None, ):
""" Eqn 21 from Carpenter et al 2013 """
return O3 * np.sqrt(I) * ((3.56E5/WS) - 2.16E4)
# Plot up values for windspeed
WS_l = np.arange(5, 40, 0.1)
# - plot up
# Eqn 20
Y = [calc_HOI_flux_eqn_20(I=100E-9, O3=20, WS=i) for i in WS_l]
plt.plot(WS_l, Y, label='Eqn 20')
# Eqn 21
Y = [calc_HOI_flux_eqn_21(I=100E-9, O3=20, WS=i) for i in WS_l]
plt.plot(WS_l, Y, label='Eqn 21')
# Update aesthetics of plot and save
plt.title('Flu HOI vs. wind speed')
plt.ylabel('HOI flux, nmol m$^{-2}$ d$^{-1}$')
plt.xlabel('Wind speed (ms)')
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 38.048607 | 340 | 0.57818 |
f8675dfd4e125d168dde1ba9e29185bd73af107b
| 4,331 |
py
|
Python
|
writer/cashData/csvUtils.py
|
sifarone/gce_k8s_deployment
|
f596e17b9d0263ae24c61ebba9925af4719b4306
|
[
"MIT"
] | null | null | null |
writer/cashData/csvUtils.py
|
sifarone/gce_k8s_deployment
|
f596e17b9d0263ae24c61ebba9925af4719b4306
|
[
"MIT"
] | null | null | null |
writer/cashData/csvUtils.py
|
sifarone/gce_k8s_deployment
|
f596e17b9d0263ae24c61ebba9925af4719b4306
|
[
"MIT"
] | 1 |
2021-01-24T17:07:37.000Z
|
2021-01-24T17:07:37.000Z
|
import pandas as pd
from . import cashUtils as utils
| 36.091667 | 93 | 0.484184 |
f8683ceaf922240bb0a9b5391ea9deb94effc25d
| 253 |
py
|
Python
|
programming/python_in_high_performance_computing/cyt_modules/cyt_setup.py
|
carlosevmoura/courses-notes
|
dc938625dd79267f9a262e7e6939205f63dda885
|
[
"MIT"
] | null | null | null |
programming/python_in_high_performance_computing/cyt_modules/cyt_setup.py
|
carlosevmoura/courses-notes
|
dc938625dd79267f9a262e7e6939205f63dda885
|
[
"MIT"
] | null | null | null |
programming/python_in_high_performance_computing/cyt_modules/cyt_setup.py
|
carlosevmoura/courses-notes
|
dc938625dd79267f9a262e7e6939205f63dda885
|
[
"MIT"
] | null | null | null |
from distutils.core import Extension, setup
from Cython.Build import cythonize
from Cython.Compiler import Options
Options.docstrings = False
ext = Extension(name="cyt_module", sources=["cyt_module.pyx"])
setup(
ext_modules = cythonize(ext),
)
| 18.071429 | 62 | 0.766798 |
f86cbd077218ced0fe45ca2c5ef698554acc3ecd
| 18,995 |
py
|
Python
|
server_code.py
|
johnr0/TaleBrush-backend
|
f7429e10f328087444647d5dc6bf1f3a22ccfcce
|
[
"BSD-3-Clause"
] | 1 |
2022-02-25T18:36:16.000Z
|
2022-02-25T18:36:16.000Z
|
server_code.py
|
johnr0/Generative-Input-NLP
|
9607cf2db2aa29f10d4b2179e25dc5bfc9b00288
|
[
"BSD-3-Clause"
] | null | null | null |
server_code.py
|
johnr0/Generative-Input-NLP
|
9607cf2db2aa29f10d4b2179e25dc5bfc9b00288
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
from flask_cors import CORS, cross_origin
import torch
import json
import numpy as np
import torch
from modeling_gptneo import GPTNeoForCausalLM
from modeling_gpt2 import GPT2LMHeadModel
from transformers import (
GPTNeoConfig,
GPT2Config,
GPT2Tokenizer
)
import transformers
from nltk import sent_tokenize
import nltk
nltk.download('punkt')
### Loading the model
code_desired = "true"
code_undesired = "false"
model_type = 'gpt2'
gen_type = "gedi"
gen_model_name_or_path = "EleutherAI/gpt-neo-2.7B"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MODEL_CLASSES = {"gptneo": (GPTNeoConfig, GPTNeoForCausalLM, GPT2Tokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),}
config_class_n, model_class_n, tokenizer_class_n = MODEL_CLASSES["gptneo"]
config_class_2, model_class_2, tokenizer_class_2 = MODEL_CLASSES["gpt2"]
tokenizer = tokenizer_class_n.from_pretrained('EleutherAI/gpt-neo-2.7B', do_lower_case=False, additional_special_tokens=['[Prompt]'])
model = model_class_n.from_pretrained(gen_model_name_or_path, load_in_half_prec=True)
model = model.to(device)
model = model.float()
model.config.use_cache=True
model.resize_token_embeddings(len(tokenizer))
gedi_model_name_or_path = 'fortune_gedi'
gedi_model = model_class_2.from_pretrained(gedi_model_name_or_path)
gedi_model.to(device)
gedi_model.resize_token_embeddings(len(tokenizer))
gedi_model.resize_token_embeddings(50258)
wte = gedi_model.get_input_embeddings()
wte.weight.requires_grad=False
wte.weight[len(tokenizer)-1, :]= wte.weight[len(tokenizer)-2, :]
gedi_model.set_input_embeddings(wte)
embed_cont = torch.load('./result_embedding_cont')
embed_infill_front = torch.load('./result_embedding_infill_front')
embed_infill_back = torch.load('./result_embedding_infill_back')
embed_recognition = torch.load('./result_embedding_recognition')
recognition_score = torch.load('./recog_score')
model.set_input_embeddings(embed_cont.wte)
# setting arguments for generation
#max generation length
gen_length = 40
#omega from paper, higher disc_weight means more aggressive topic steering
disc_weight = 30
#1 - rho from paper, should be between 0 and 1 higher filter_p means more aggressive topic steering
filter_p = 0.8
#tau from paper, preserves tokens that are classified as correct topic
target_p = 0.8
#hyperparameter that determines class prior, set to uniform by default
class_bias = 0
if gen_length>1024:
length = 1024
else:
length = gen_length
def cut_into_sentences(text, do_cleanup=True):
"""
Cut text into sentences. \n are also regarded as a sentence.
:param do_cleanup: if True, do cleanups.
:param text: input text.
:return: sentences.
"""
all_sentences = []
# print(text)
# sentences_raw = text.split("\n")
text = text.replace("[Prompt] [Prompt] [Prompt] [Prompt] ", "[Prompt] [Prompt] [Prompt] ")
sentences_raw = text.split('[Prompt] [Prompt] [Prompt]')
text = sentences_raw[len(sentences_raw)-1]
text = text.replace("Start:", " ")
text = text.replace("Characters:", " ")
text = text.replace("Story after start:", " ")
sentences_raw = [text.replace("\n", " ")]
result = []
for item in sentences_raw:
sentence_in_item = sent_tokenize(item)
for item2 in sentence_in_item:
all_sentences.append(item2.strip())
if do_cleanup:
for item in all_sentences:
item = item.replace('<|endoftext|>', '')
if len(item) > 2:
result.append(item)
else:
result = all_sentences
return result
def generate_one_sentence(sentence, control, length=50, disc_weight=30, temperature=0.8, gpt3_id=None):
"""
Generate one sentence based on input data.
:param sentence: (string) context (prompt) used.
:param topic: (dict) {topic: weight, topic:weight,...} topic that the sentence need to steer towards.
:param extra_args: (dict) a dictionary that certain key will trigger additional functionality.
disc_weight: Set this value to use a different control strength than default.
get_gen_token_count: Return only how many tokens the generator has generated (for debug only).
:return: sentence generated, or others if extra_args are specified.
"""
secondary_code = control
if sentence == "":
print("Prompt is empty! Using a dummy sentence.")
sentence = "."
# Specify prompt below
prompt = sentence
# Calculate oroginal input length.
length_of_prompt = len(sentence)
start_len = 0
text_ids = tokenizer.encode(prompt)
length_of_prompt_in_tokens = len(text_ids)
# print('text ids', text_ids)
encoded_prompts = torch.LongTensor(text_ids).unsqueeze(0).to(device)
if type(control) is str:
multi_code = tokenizer.encode(secondary_code)
elif type(control) is dict:
multi_code = {}
for item in secondary_code:
encoded = tokenizer.encode(item)[0] # only take the first one
multi_code[encoded] = secondary_code[item]
else:
raise NotImplementedError("topic data type of %s not supported... Supported: (str,dict)" % type(control))
# If 1, generate sentences towards a specific topic.
attr_class = 1
print(multi_code)
if int(control)!=-1:
if gpt3_id is None:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=gedi_model,
tokenizer=tokenizer,
disc_weight=disc_weight,
filter_p=filter_p,
target_p=target_p,
class_bias=class_bias,
attr_class=attr_class,
code_0=code_undesired,
code_1=code_desired,
multi_code=multi_code,
)
else:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=gedi_model,
tokenizer=tokenizer,
disc_weight=disc_weight,
filter_p=filter_p,
target_p=target_p,
class_bias=class_bias,
attr_class=attr_class,
code_0=code_undesired,
code_1=code_desired,
multi_code=multi_code,
gpt3_api_key=gpt3_id,
)
text = tokenizer.decode(generated_sequence.tolist()[0])
else:
if gpt3_id is None:
generated_sequence = model.generate(input_ids=encoded_prompts,
pad_lens=None,
max_length=length + length_of_prompt_in_tokens,
top_k=None,
top_p=None,
repetition_penalty=1.2,
rep_penalty_scale=10,
eos_token_ids=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
bad_token_ids = tokenizer.all_special_ids,
do_sample=True,
temperature = temperature,
penalize_cond=True,
gedi_model=None,
tokenizer=tokenizer,
disc_weight=disc_weight,
class_bias=class_bias,
attr_class=attr_class,
)
text = tokenizer.decode(generated_sequence.tolist()[0])
else:
import openai
openai.api_key = gpt3_id
completion = openai.Completion()
response = completion.create(prompt=prompt,
engine="curie",
max_tokens=length,
temperature=temperature,)
text = response["choices"][0]["text"]
text = cut_into_sentences(text)
if len(text) == 0:
print("Warning! No text generated.")
return ""
all_gen_text = text[0]
return all_gen_text
import numpy as np
def continuing_generation(prompts, generation_controls, characters, temperatures, gpt3_id=None, disc_weight=30):
"""
Explanations on controls
prompts: The prompt to be input. This is a list of sentences.
generation_controls: Generation control in the list. If no control is given, -1 is given.
"""
model.set_input_embeddings(embed_cont)
prompts = list(prompts)
generated = []
character_prepend = '[Prompt][Prompt][Prompt]'
for idx, character in enumerate(characters):
if idx==0:
character_prepend = character_prepend+character
else:
character_prepend = character_prepend+' '+character
if idx != len(characters)-1:
character_prepend = character_prepend + ','
prompt_start_idx = 0
for c_idx, generation_control in enumerate(generation_controls):
temperature = temperatures[c_idx]
while True:
prompt_postpend = '[Prompt][Prompt][Prompt]'
# prompt_postpend = 'Story: '
for i in range(prompt_start_idx, len(prompts)):
prompt_postpend = prompt_postpend + prompts[i]
if i != len(prompts)-1:
prompt_postpend = prompt_postpend + ' '
# continue
else:
prompt_postpend = prompt_postpend
prompt_input = prompt_postpend+character_prepend+ '[Prompt][Prompt][Prompt]'
prompt_encoded = tokenizer.encode(prompt_input)
length_of_prompt_in_tokens = len(prompt_encoded)
if length_of_prompt_in_tokens>2048:
prompt_start_idx = prompt_start_idx + 1
else:
break
print(prompt_input, generation_control)
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
prompts.append(gen_sent)
generated.append(gen_sent)
for gen in generated:
print('gen:', gen)
print()
return generated
def infilling_generation(pre_prompts, post_prompts, generation_controls, characters, temperatures, is_front, gpt3_id=None, disc_weight=30):
"""
Explanations on controls
prompts: The prompt to be input. This is a list of sentences.
generation_controls: Generation control in the list. If no control is given, -1 is given.
"""
pre_prompts = list(pre_prompts)
post_prompts = list(post_prompts)
right = ''
for idx, pp in enumerate(post_prompts):
right = right + pp
if idx!=len(post_prompts)-1:
right = right + ' '
left = ''
for idx, pp in enumerate(pre_prompts):
left = left + pp
if idx!=len(post_prompts)-1:
left = left + ' '
generated = ['']*len(generation_controls)
# gen_counter = 0
for gen_counter in range(len(generation_controls)):
if is_front:
generation_control = generation_controls[int(gen_counter/2)]
temperature = temperatures[int(gen_counter/2)]
model.set_input_embeddings(embed_infill_front)
prompt_input = '[Prompt][Prompt][Prompt]'+right+'[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt][Prompt]'
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
generated[int(gen_counter/2)] =gen_sent
print(gen_sent)
left = left + ' ' + gen_sent
else:
generation_control = generation_controls[len(generated)-1-int(gen_counter/2)]
temperature = temperatures[len(generated)-1-int(gen_counter/2)]
model.set_input_embeddings(embed_infill_back)
prompt_input = '[Prompt][Prompt][Prompt]'+left+'[Prompt][Prompt][Prompt]'+right + '[Prompt][Prompt][Prompt][Prompt]'
gen_sent = generate_one_sentence(prompt_input, generation_control, temperature=temperature, gpt3_id=gpt3_id, disc_weight=disc_weight)
generated[len(generated)-1-int(gen_counter/2)] =gen_sent
print(gen_sent)
right = gen_sent+' '+right
for gen in generated:
print('gen', gen)
print()
return generated
app = FlaskAPI(__name__)
# run_with_ngrok(app)
CORS(app, resources={r"/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Below is temporary function with sentiment analysis.
# Hence, it needs to be updated later.
if __name__=="__main__":
app.run(host='0.0.0.0', port=11080)
| 41.025918 | 140 | 0.588944 |
f86d0468889ac52f5ce1040fe21e913a6db95f94
| 20,391 |
py
|
Python
|
pymatflow/abinit/post/bands.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6 |
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/abinit/post/bands.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1 |
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/abinit/post/bands.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1 |
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
"""
post_bands:
post_bands extract data from static-o_DS3_EBANDS.agr and it will build
the kpoints length: xcoord_k from the high symmetry line and the corresponding
basis for reciprocal space.
b1 = 1 / a1, b2 = 1 / a2 and b3 = 1 / a3.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
| 51.492424 | 142 | 0.529106 |
f86d356d798352c0185a9ec2592dc21b131a7ed8
| 337 |
py
|
Python
|
logconfig.py
|
Erick-Faster/gerbot-api
|
36d723c7e9df525b99fd4eff2da318e9046e7734
|
[
"Apache-2.0"
] | null | null | null |
logconfig.py
|
Erick-Faster/gerbot-api
|
36d723c7e9df525b99fd4eff2da318e9046e7734
|
[
"Apache-2.0"
] | null | null | null |
logconfig.py
|
Erick-Faster/gerbot-api
|
36d723c7e9df525b99fd4eff2da318e9046e7734
|
[
"Apache-2.0"
] | null | null | null |
import logging
import logging.config
logging.config.fileConfig('./instance/logging.conf')
# create logger
logger = logging.getLogger('Cognitive-API')
# 'application' code
'''
logger.debug('debug message')
logger.info('info message')
logger.warning('warn message')
logger.error('error message')
logger.critical('critical message')
'''
| 19.823529 | 52 | 0.756677 |
f86db685725dd6affbd6d16efda49f2dd028eb93
| 1,735 |
py
|
Python
|
tests/app/test_app_service.py
|
0604hx/buter
|
670584e7c39c985192684c9f68f52fc69c57049c
|
[
"MIT"
] | 2 |
2017-11-21T10:00:47.000Z
|
2018-02-02T04:40:09.000Z
|
tests/app/test_app_service.py
|
0604hx/buter
|
670584e7c39c985192684c9f68f52fc69c57049c
|
[
"MIT"
] | 1 |
2018-10-31T06:56:22.000Z
|
2018-11-01T00:58:16.000Z
|
tests/app/test_app_service.py
|
0604hx/buter
|
670584e7c39c985192684c9f68f52fc69c57049c
|
[
"MIT"
] | 5 |
2017-12-14T01:07:21.000Z
|
2020-04-29T02:21:46.000Z
|
import json
import unittest
from buter.app.services import load_from_file, detect_app_name
from buter.server import docker
from buter.util.Utils import unzip
from config import getConfig
if __name__ == '__main__':
unittest.main()
| 30.982143 | 109 | 0.609222 |
f86f8495a3b204ecbbc51199ca2187879cae3c8e
| 397 |
py
|
Python
|
code/level6.py
|
ab300819/PythonChallenge
|
4bcc91f8b11d0a5ec5720137bef55eec6b1f7581
|
[
"Apache-2.0"
] | null | null | null |
code/level6.py
|
ab300819/PythonChallenge
|
4bcc91f8b11d0a5ec5720137bef55eec6b1f7581
|
[
"Apache-2.0"
] | null | null | null |
code/level6.py
|
ab300819/PythonChallenge
|
4bcc91f8b11d0a5ec5720137bef55eec6b1f7581
|
[
"Apache-2.0"
] | null | null | null |
# -*-coding:utf-8-*-
__author__ = 'Mason'
import re
import zipfile
z = zipfile.ZipFile('channel.zip', mode='r')
number = '90052'
comments = []
while True:
text = z.read(number + '.txt')
number = re.findall('([0-9]+)', text)
print number
try:
number = number[0]
comments.append(z.getinfo(number + '.txt').comment)
except:
break
print ''.join(comments)
| 19.85 | 59 | 0.594458 |
f870be2bd112b621b44e0d7642b1d268ee31edf5
| 728 |
py
|
Python
|
subscriptions/subscription.py
|
iamsharmaapoorv/availability-checker
|
02fc28f495140f74fa38c02a3e4a5111e196151f
|
[
"MIT"
] | null | null | null |
subscriptions/subscription.py
|
iamsharmaapoorv/availability-checker
|
02fc28f495140f74fa38c02a3e4a5111e196151f
|
[
"MIT"
] | null | null | null |
subscriptions/subscription.py
|
iamsharmaapoorv/availability-checker
|
02fc28f495140f74fa38c02a3e4a5111e196151f
|
[
"MIT"
] | null | null | null |
from products.product import Product
from notifications.notification import Notification
from clients.client import Client
| 33.090909 | 67 | 0.696429 |
f871c0ad8b9204fef05550a10cc4ceb534586079
| 654 |
py
|
Python
|
joi2008yo/joi2008yo_e.py
|
Vermee81/practice-coding-contests
|
78aada60fa75f208ee0eef337b33b27b1c260d18
|
[
"MIT"
] | null | null | null |
joi2008yo/joi2008yo_e.py
|
Vermee81/practice-coding-contests
|
78aada60fa75f208ee0eef337b33b27b1c260d18
|
[
"MIT"
] | null | null | null |
joi2008yo/joi2008yo_e.py
|
Vermee81/practice-coding-contests
|
78aada60fa75f208ee0eef337b33b27b1c260d18
|
[
"MIT"
] | null | null | null |
# https://atcoder.jp/contests/joi2008yo/tasks/joi2008yo_e
R, C = list(map(int, input().split()))
senbei_pos = []
ans = 0
for _ in range(R):
pos = list(map(int, input().split()))
senbei_pos.append(pos)
for bit in range(2**R):
total = 0
copied_pos = senbei_pos[:]
# R101020
flip_row_pos = list(format(bit, '010b'))
for j in range(C):
column = [p[j] for p in copied_pos]
one_count = sum([column[k] ^ int(flip_row_pos[10 - R + k])
for k in range(R)])
zero_count = R - one_count
total += max(zero_count, one_count)
ans = max(ans, total)
print(ans)
| 29.727273 | 66 | 0.59633 |
f8724ce5a5705922dd55fcf91b7512b691dc8ab7
| 2,850 |
py
|
Python
|
yttgmp3.py
|
RomaniukVadim/ytmp3_bot
|
ce3cc3cfa2098257e4ec22c019c8c33d31a73128
|
[
"WTFPL"
] | 1 |
2018-03-27T00:08:26.000Z
|
2018-03-27T00:08:26.000Z
|
yttgmp3.py
|
RomaniukVadim/ytmp3_bot
|
ce3cc3cfa2098257e4ec22c019c8c33d31a73128
|
[
"WTFPL"
] | null | null | null |
yttgmp3.py
|
RomaniukVadim/ytmp3_bot
|
ce3cc3cfa2098257e4ec22c019c8c33d31a73128
|
[
"WTFPL"
] | 1 |
2020-06-04T02:49:20.000Z
|
2020-06-04T02:49:20.000Z
|
#!/usr/env python3
import requests
import os
import glob
import telegram
from time import sleep
token = "token"
bot = telegram.Bot(token=token)
# , bash youtube-dl -x --audio-format mp3 <link>, mp3
mp3_bot = BotHandler(token)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| 30.978261 | 138 | 0.597895 |
f873639a13e98ee3a4151d1be3542d91c969ac64
| 530 |
py
|
Python
|
djangobmf/contrib/team/views.py
|
dmatthes/django-bmf
|
3a97167de7841b13f1ddd23b33ae65e98dc49dfd
|
[
"BSD-3-Clause"
] | 1 |
2020-05-11T08:00:49.000Z
|
2020-05-11T08:00:49.000Z
|
djangobmf/contrib/team/views.py
|
dmatthes/django-bmf
|
3a97167de7841b13f1ddd23b33ae65e98dc49dfd
|
[
"BSD-3-Clause"
] | null | null | null |
djangobmf/contrib/team/views.py
|
dmatthes/django-bmf
|
3a97167de7841b13f1ddd23b33ae65e98dc49dfd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from djangobmf.views import ModuleCreateView
from djangobmf.views import ModuleUpdateView
from djangobmf.views import ModuleDetailView
from .forms import BMFTeamUpdateForm
from .forms import BMFTeamCreateForm
| 22.083333 | 44 | 0.828302 |
f873731d39e77de62eb053df48244e290afd54de
| 1,038 |
py
|
Python
|
py/LSS/imaging/veto_masks/lrg/lrg_wise_mask_v1.py
|
echaussidon/LSS
|
205ce48a288acacbd41358e6d0215f4aff355049
|
[
"BSD-3-Clause"
] | null | null | null |
py/LSS/imaging/veto_masks/lrg/lrg_wise_mask_v1.py
|
echaussidon/LSS
|
205ce48a288acacbd41358e6d0215f4aff355049
|
[
"BSD-3-Clause"
] | null | null | null |
py/LSS/imaging/veto_masks/lrg/lrg_wise_mask_v1.py
|
echaussidon/LSS
|
205ce48a288acacbd41358e6d0215f4aff355049
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
# import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table, vstack, hstack
import fitsio
from astropy.io import fits
from scipy.interpolate import interp1d
output_path = '/global/cfs/cdirs/desi/users/rongpu/desi_mask/w1_bright-2mass-lrg_mask_v1.fits'
# WISE mask
w1_mags = [0, 0.5, 1, 1.5, 2, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0]
w1_radii = [600, 600, 550, 500, 475, 425, 400, 400, 390, 392.5, 395, 370, 360, 330, 275, 240, 210, 165, 100, 75, 60]
w1_max_mag = 10.0
f_radius = interp1d(w1_mags, w1_radii, bounds_error=False, fill_value='extrapolate')
wise_path = '/global/cfs/cdirs/desi/users/rongpu/desi_mask/w1_bright-2mass-13.3-dr9.fits'
wise = Table(fitsio.read(wise_path))
# print(len(wise))
wise['w1ab'] = np.array(wise['W1MPRO']) + 2.699
mask = wise['w1ab']<w1_max_mag
wise['radius'] = 0.
wise['radius'][mask] = f_radius(wise['w1ab'][mask])
wise.write(output_path)
| 33.483871 | 116 | 0.706166 |
f87515fbbdca8d3d26053fb65bc3d5ece4d188b8
| 290 |
py
|
Python
|
cursoDePythonNaPratica/aula18 - telegram.py
|
wemerson-henrique/kivy
|
3cb6061a2d19b01e86c3738206f30c8a853763d4
|
[
"MIT"
] | null | null | null |
cursoDePythonNaPratica/aula18 - telegram.py
|
wemerson-henrique/kivy
|
3cb6061a2d19b01e86c3738206f30c8a853763d4
|
[
"MIT"
] | null | null | null |
cursoDePythonNaPratica/aula18 - telegram.py
|
wemerson-henrique/kivy
|
3cb6061a2d19b01e86c3738206f30c8a853763d4
|
[
"MIT"
] | null | null | null |
import telepot
# No criei um bot no telegram ainda, dessa forma este codigo no funciona
# TODO: Criar bot no telegram e pegar chave
bot = telepot.Bot("Aqui vai minha chave do Telegram")
bot.message_loop(recebendoMsg)
while True:
pass
| 20.714286 | 74 | 0.741379 |
f875e138fd658884c3bfbd92197a369b04338ea0
| 4,590 |
py
|
Python
|
cembot/languages/EN.py
|
niksart/cembot
|
99ec3067bde5b8b72053dd18caa18742afba6a5e
|
[
"MIT"
] | null | null | null |
cembot/languages/EN.py
|
niksart/cembot
|
99ec3067bde5b8b72053dd18caa18742afba6a5e
|
[
"MIT"
] | 15 |
2018-08-30T13:56:27.000Z
|
2021-07-21T08:58:03.000Z
|
cembot/languages/EN.py
|
niksart/cembot
|
99ec3067bde5b8b72053dd18caa18742afba6a5e
|
[
"MIT"
] | null | null | null |
# Support for english (EN) language
helper_commands = {
"AUTHORIZE": "Usage:\n/authorize @<username>\n/authorize <user id>",
"DEAUTHORIZE": "Usage:\n/deauthorize @<username>\n/deauthorize <user id>",
"GIVEN": "Usage:\n/given <amount> @<username> <description>",
"SPENT": "Usage:\n/spent <amount> <description>.\nPayees are all the members of the group, including the payer.",
"MYID": "Usage: /myid\nshow your user id, useful if you have no username",
"START": "Show the initial message",
"LAST_GROUP_EXPENSES": "See the last expenses in a group. \n"
"Usage:\n"
" /last_expenses (show max 5 expenses)\n"
" /last_expenses <n max expenses to show>",
"LAST_CHARGES": "Use this command in private chat to see the last charges on your cembot account. \n"
"Usage:\n"
" /last_charges (show max 5 charges)\n"
" /last_charges <n max charges to show>",
"LAST_LOANS": "Use this command in private chat to see the last loans you did \n"
"Usage:\n"
" /last_loans (show max 5 loans)\n"
" /last loans <n max loans to show>"
}
info = {
"start": missing_translation("start"),
"guide": missing_translation("start"),
"introduced_in_group": "Hello everyone!\nI'm cembot, and I'll help you administrating your expenses!\n"
"Each member of this group now should introduce yourself. "
"People added after this message can avoid to introduce themselves.\n"
"Do it with the command /hereIam",
"each_member_introduced": missing_translation("each_member_introduced"),
"person_missing": "1 person is missing.",
"people_missing": " people are missing.",
"transaction_succeed": "Transaction added successfully!",
"authorized_confirm(user)": "User @%s has been authorized.",
"deauthorized_confirm(user)": "The authorization of user @%s has been revoked.",
"your_id_is(id)": "Your Telegram id is %s. You can add in Telegram settings an username and use cembot more easily.",
"balance_with_other_user(user,balance)": "Your balance with the user %s is %s",
"header_balance_credit": " Credits\n",
"header_balance_debit": " Debits\n",
"commands": missing_translation("commands"),
"these_are_the_last_group_expenses": missing_translation("these_are_the_last_group_expenses"),
"these_are_the_last_individual_charges": missing_translation("these_are_the_last_individual_charges"),
"these_are_the_last_group_charges": missing_translation("these_are_the_last_group_charges"),
"no_charges_yet": missing_translation("no_charges_yet"),
"these_are_the_last_individual_loans": missing_translation("these_are_the_last_individual_loans"),
"these_are_the_last_group_loans": missing_translation("these_are_the_last_group_loans")
}
error = {
"command_unavailable_for_private": "For using this command open a private chat with @en_cembot.",
"command_unavailable_for_group": "For using this command add @en_cembot in a group.",
"amount_money_not_valid": "Amount of money not valid.",
"waiting_for_all_users": "Someone did not present themselves yet.\n"
"Present yourself with /hereIam before adding expenses.",
"lack_of_authorization(user)": "The user @%s has not authorized you for charging expenses.",
"user_unregistered(user)": "The user @%s that you want to add as a payee is not registered on our system",
"can't_deauthorize_cause_not_authorized_yet": "You have not already authorized this user. You can't deauthorize it.",
"have_authorized_yet_this_user": "You have already authorized this user.",
"maybe_you_wrote_an_username_instead_id": "This is not a numeric id. If you intended to write an username write it with a @ at the beginning.",
"insert_a_correct_number": "Insert a correct number and retry"
}
# commands
private_commands = {
"start": "START",
"commands": "COMMANDS",
"authorize": "AUTHORIZE",
"revoke": "DEAUTHORIZE",
"given": "GIVEN",
"myid": "MYID",
"balance": "BALANCE",
"last_charges": "LAST_CHARGES",
"last_loans": "LAST_LOANS",
"guide": "GUIDE"
}
group_commands = {
"spent": "SPENT",
"spent@en_cembot": "SPENT", # version with @[language]_cembot
"hereIam": "PRESENTATION",
"hereIam@en_cembot": "PRESENTATION", # version with @[language]_cembot
"last_expenses": "LAST_GROUP_EXPENSES",
"last_expenses@en_cembot": "LAST_GROUP_EXPENSES", # version with @[language]_cembot
}
| 49.891304 | 144 | 0.70305 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.