hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a79b3e5980fa9ccf32a0d7267aad362eafb93af
| 39,389 |
py
|
Python
|
test/dialect/mssql/test_compiler.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | 1 |
2018-11-15T16:02:17.000Z
|
2018-11-15T16:02:17.000Z
|
test/dialect/mssql/test_compiler.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | null | null | null |
test/dialect/mssql/test_compiler.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import schema
from sqlalchemy.sql import table, column, quoted_name
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import mxodbc
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import sql
from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
update, delete, insert, extract, union, func, PrimaryKeyConstraint, \
UniqueConstraint, Index, Sequence, literal
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import base
| 39.193035 | 79 | 0.514991 |
0a7c17bb65b9c51d7ea399323ecb512289bae204
| 8,155 |
py
|
Python
|
sdk/python/pulumi_kubernetes/coordination/v1/_inputs.py
|
polivbr/pulumi-kubernetes
|
36a5fb34240a38a60b52a5f4e55e66e248d9305f
|
[
"Apache-2.0"
] | 277 |
2018-06-18T14:57:09.000Z
|
2022-03-29T04:05:06.000Z
|
sdk/python/pulumi_kubernetes/coordination/v1/_inputs.py
|
polivbr/pulumi-kubernetes
|
36a5fb34240a38a60b52a5f4e55e66e248d9305f
|
[
"Apache-2.0"
] | 1,447 |
2018-06-20T00:58:34.000Z
|
2022-03-31T21:28:43.000Z
|
sdk/python/pulumi_kubernetes/coordination/v1/_inputs.py
|
polivbr/pulumi-kubernetes
|
36a5fb34240a38a60b52a5f4e55e66e248d9305f
|
[
"Apache-2.0"
] | 95 |
2018-06-30T03:30:05.000Z
|
2022-03-29T04:05:09.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'LeaseSpecArgs',
'LeaseArgs',
]
| 46.073446 | 335 | 0.682649 |
0a7c48d84a538009f1d4846a3bf1ffec3626caf1
| 1,005 |
py
|
Python
|
Components/Align All Components.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | 1 |
2021-09-04T18:41:30.000Z
|
2021-09-04T18:41:30.000Z
|
Components/Align All Components.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | null | null | null |
Components/Align All Components.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Align All Components
# -*- coding: utf-8 -*-
__doc__="""
Fakes auto-alignment in glyphs that cannot be auto-aligned.
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
thisFontMasterID = thisFont.selectedFontMaster.id # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
print "Aligning components in:", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| 32.419355 | 81 | 0.78607 |
0a7c6f49614d6822678c761e9a25fddc34bcb0a8
| 818 |
py
|
Python
|
SC101Lecture_code/SC101_week4/draw_basic.py
|
Jewel-Hong/SC-projects
|
9502b3f0c789a931226d4ce0200ccec56e47bc14
|
[
"MIT"
] | null | null | null |
SC101Lecture_code/SC101_week4/draw_basic.py
|
Jewel-Hong/SC-projects
|
9502b3f0c789a931226d4ce0200ccec56e47bc14
|
[
"MIT"
] | null | null | null |
SC101Lecture_code/SC101_week4/draw_basic.py
|
Jewel-Hong/SC-projects
|
9502b3f0c789a931226d4ce0200ccec56e47bc14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Stanford CS106AP
TK Drawing Lecture Exercises
Courtesy of Nick Parlante
"""
import tkinter as tk
# provided function, this code is complete
def make_canvas(width, height):
"""
Creates and returns a drawing canvas
of the given int size, ready for drawing.
"""
top = tk.Tk()
top.minsize(width=width + 10, height=height + 10)
canvas = tk.Canvas(top, width=width, height=height)
canvas.pack()
canvas.xview_scroll(6, "units") # hack so (0, 0) works correctly
canvas.yview_scroll(6, "units")
return canvas
if __name__ == '__main__':
main()
| 21.526316 | 69 | 0.656479 |
0a7cd64e2508df91e539f1a6f804bc5eb4b0ea83
| 12,372 |
py
|
Python
|
audio/audio_server.py
|
artigianitecnologici/marrtino_apps
|
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
|
[
"BSD-4-Clause"
] | null | null | null |
audio/audio_server.py
|
artigianitecnologici/marrtino_apps
|
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
|
[
"BSD-4-Clause"
] | null | null | null |
audio/audio_server.py
|
artigianitecnologici/marrtino_apps
|
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
|
[
"BSD-4-Clause"
] | null | null | null |
# Only PCM 16 bit wav 44100 Hz - Use audacity or sox to convert audio files.
# WAV generation
# Synth
# sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bip.wav synth 0.25 sine 800
# sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bop.wav synth 0.25 sine 400
# Voices
# pico2wave -l "it-IT" -w start.wav "Bene! Si Parte!"
# Then convert wav files to to 44100 Hz
# Note: some initial sound may not be played.
# alsaaudio examples
# https://larsimmisch.github.io/pyalsaaudio/libalsaaudio.html
import threading
import time
import socket
import sys, os, platform
import re
import wave
import argparse
import rospy
use_sound_play = False
use_alsaaudio = True
try:
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
except:
print('ROS package sound_play required.')
print('Install with: sudo apt-get install ros-kinetic-audio-common libasound2')
use_sound_play = False
#sys.exit(0)
try:
import sox
except:
print('sox required. Install with: pip install --user sox')
sys.exit(0)
try:
import alsaaudio
except:
print('alsaaudio required. Install with: pip install --user pyalsaaudio')
use_alsaaudio = False
#sys.exit(0)
from asr_server import ASRServer
SOUNDS_DIR = "sounds/" # dir with sounds
soundfile = None # sound file
tts_server = None
asr_server = None
# def playwav_pa(self, sfile):
# global soundfile
# self.streaming = True
# self.stream = self.pa.open(format = 8, #self.pa.get_format_from_width(f.getsampwidth#()),
# channels = 1, #f.getnchannels(),
# rate = 44100, #f.getframerate(),
# output = True,
# stream_callback = TTS_callback,
# output_device_index = self.output_device)
# soundfile = sfile
# soundfile.setpos(0)
# self.stream.start_stream()
# while self.stream.is_active():
# time.sleep(1.0)
# self.stream.stop_stream()
# self.stream.close()
# self.streaming = False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='audio_server')
parser.add_argument('-ttsport', type=int, help='TTS server port [default: 9001]', default=9001)
parser.add_argument('-asrport', type=int, help='ASR server port [default: 9002]', default=9002)
parser.add_argument('-device', type=str, help='audio device [default: \'sysdefault\']', default='sysdefault')
args = parser.parse_args()
tts_server = TTSServer(args.ttsport,args.device)
asr_server = ASRServer(args.asrport)
tts_server.start()
time.sleep(1)
asr_server.start()
run = True
while (run):
try:
time.sleep(3)
#if (not tts_server.streaming):
# cmd = 'play -n --no-show-progress -r 44100 -c1 synth 0.1 sine 50 vol 0.01' # keep sound alive
# os.system(cmd)
except KeyboardInterrupt:
print "Exit"
run = False
tts_server.stop()
asr_server.stop()
sys.exit(0)
| 30.93 | 116 | 0.516246 |
0a7ce8d41f3884cc3735bd20c347dfb81bcc70b3
| 2,714 |
py
|
Python
|
torchvision/datasets/kinetics.py
|
sh1doy/vision
|
d7dce1034a0682bf8832bc89cda9589d6598087d
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/datasets/kinetics.py
|
sh1doy/vision
|
d7dce1034a0682bf8832bc89cda9589d6598087d
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/datasets/kinetics.py
|
sh1doy/vision
|
d7dce1034a0682bf8832bc89cda9589d6598087d
|
[
"BSD-3-Clause"
] | null | null | null |
from .video_utils import VideoClips
from .utils import list_dir
from .folder import make_dataset
from .vision import VisionDataset
| 39.333333 | 97 | 0.669123 |
0a7e85c92ceca48b141eaf4f09d1496be103b6aa
| 10,795 |
py
|
Python
|
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py
|
CharleyFarley/ovvio
|
81489ee64f91e4aab908731ce6ddf59edb9314bf
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py
|
CharleyFarley/ovvio
|
81489ee64f91e4aab908731ce6ddf59edb9314bf
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/sphinx/builders/qthelp.py
|
CharleyFarley/ovvio
|
81489ee64f91e4aab908731ce6ddf59edb9314bf
|
[
"MIT"
] | 1 |
2016-08-24T01:08:34.000Z
|
2016-08-24T01:08:34.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
Build input files for the Qt collection generator.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from six import text_type
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>%(title)s</title>
<homePage>%(homepage)s</homePage>
<startPage>%(startpage)s</startPage>
</assistant>
<docFiles>
<generate>
<file>
<input>%(outname)s.qhp</input>
<output>%(outname)s.qch</output>
</file>
</generate>
<register>
<file>%(outname)s.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
<namespace>%(namespace)s</namespace>
<virtualFolder>doc</virtualFolder>
<customFilter name="%(project)s %(version)s">
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
</customFilter>
<filterSection>
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
<toc>
<section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
</section>
</toc>
<keywords>
%(keywords)s
</keywords>
<files>
%(files)s
</files>
</filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
| 36.103679 | 80 | 0.568133 |
0a7edac2ed561ec67cb5f3e276d02750502435c8
| 7,252 |
py
|
Python
|
scripts/scrape_sciencedirect_urls.py
|
UWPRG/BETO2020
|
55b5b329395da79047e9083232101d15af9f2c49
|
[
"MIT"
] | 4 |
2020-03-04T21:08:11.000Z
|
2020-10-28T11:28:00.000Z
|
scripts/scrape_sciencedirect_urls.py
|
UWPRG/BETO2020
|
55b5b329395da79047e9083232101d15af9f2c49
|
[
"MIT"
] | null | null | null |
scripts/scrape_sciencedirect_urls.py
|
UWPRG/BETO2020
|
55b5b329395da79047e9083232101d15af9f2c49
|
[
"MIT"
] | 6 |
2019-04-15T16:51:16.000Z
|
2019-11-13T02:45:53.000Z
|
"""
This code is used to scrape ScienceDirect of publication urls and write them to
a text file in the current directory for later use.
"""
import selenium
from selenium import webdriver
import numpy as np
import pandas as pd
import bs4
from bs4 import BeautifulSoup
import time
from sklearn.utils import shuffle
def scrape_page(driver):
"""
This method finds all the publication result web elements on the webpage.
Parameters
----------
driver (Selenium webdriver object) : Instance of the webdriver class e.g.
webdriver.Chrome()
Returns
-------
elems (list) : A list of all scraped hrefs from the page
"""
elems = driver.find_elements_by_class_name('ResultItem')
return elems
def clean(elems):
"""
This method takes a list of scraped selenium web elements
and filters/ returns only the hrefs leading to publications.
Filtering includes removing all urls with keywords that are indicative of
non-html links.
Parameters
----------
elems (list) : The list of hrefs to be filtered
Returns
-------
urls (list) : The new list of hrefs, which should be the same as the list
displayed on gui ScienceDirect
"""
titles = []
urls = []
for elem in elems:
href_child = elem.find_element_by_css_selector('a[href]')
url = href_child.get_attribute('href')
title = href_child.text
titles.append(title)
urls.append(url)
return urls, titles
def build_url_list(gui_prefix,search_terms,journal_list):
"""
This method takes the list of journals and creates a tiple nested dictionary
containing all accessible urls to each page, in each year, for each journal,
for a given search on sciencedirect.
"""
dict1 = {}
years = np.arange(1995,2020)
for journal in journal_list:
dict2 = {}
for year in years:
dict3 = {}
for i in range(60):
url = gui_prefix + search_terms + '&show=100'+ '&articleTypes=FLA%2CREV' + '&years='+ str(year)
if i != 0:
url = url + '&offset=' + str(i) +'00'
url = url + '&pub=' + journal
dict3[i] = url
dict2[year] = dict3
dict1[journal] = dict2
return dict1
def proxify(scraped_urls,uw_prefix):
"""
This method takes a list of scraped urls and turns them into urls that
go through the UW Library proxy so that all of them are full access.
Parameters
----------
scraped_urls (list) : The list of URLs to be converted
uw_prefix (str) : The string that all URLs which go through the UW Library
Proxy start with.
Returns
-------
proxy_urls (list) : The list of converted URLs which go through UW Library
proxy
"""
proxy_urls = []
for url in scraped_urls:
sd_id = url[-17:]
newlink = uw_prefix + sd_id
if sd_id.startswith('S'):
proxy_urls.append(newlink)
return proxy_urls
def write_urls(urls,titles,file,journal,year):
"""
This method takes a list of urls and writes them to a desired text file.
Parameters
----------
urls (list) : The list of URLs to be saved.
file (file object) : The opened .txt file which will be written to.
year (str or int) : The year associated with the publication date.
Returns
-------
Does not return anything
"""
for link,title in zip(urls,titles):
line = link + ',' + title + ',' + journal + ',' + str(year)
file.write(line)
file.write('\n')
def find_pubTitle(driver,journal):
"""
This method finds the identifying number for a specific journal. This
identifying number is added to the gui query URL to ensure only publciations
from the desired journal are being found.
"""
pub_elems = driver.find_elements_by_css_selector('input[id*=publicationTitles]')
pub_names = []
for elem in pub_elems:
pub_name = elem.get_attribute("name")
if pub_name == journal:
return elem.get_attribute('id')[-6:] #returns the identifying number
#for that journal
df = pd.read_excel('elsevier_journals.xls')
df.Full_Category = df.Full_Category.str.lower() # lowercase topics for searching
df = df.drop_duplicates(subset = 'Journal_Title') # drop any duplicate journals
df = shuffle(df,random_state = 42)
# The set of default strings that will be used to sort which journals we want
journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem'
,'organic','polymer','chemical engineering','biotech','coloid']
name = df.Full_Category.str.contains # making this an easier command to type
# new dataframe full of only journals who's topic description contained the
# desired keywords
df2 = df[name('polymer') | name('chemistry') | name('energy')
| name('molecular') | name('colloid') | name('biochem')
| name('organic') | name('biotech') | name('chemical')]
journal_list = df2.Journal_Title # Series of only the journals to be searched
gui_prefix = 'https://www.sciencedirect.com/search/advanced?qs='
search_terms = 'chemistry%20OR%20molecule%20OR%20polymer%20OR%20organic'
url_dict = build_url_list(gui_prefix,search_terms,journal_list)
driver = webdriver.Chrome()
uw_prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/'
filename = input("Input filename with .txt extension for URL storage: ")
url_counter = 0
master_list = []
file = open(filename,'a+')
for journal in journal_list:
for year in np.arange(1995,2020):
for offset in np.arange(60):
page = url_dict[journal][year][offset]
print("journal, year, offset = ",journal,year,offset)
driver.get(page)
time.sleep(2) # need sleep to load the page properly
if offset == 0: # if on page 1, we need to grab the publisher number
try: # we may be at a page which won't have the item we are looking for
pubTitles = find_pubTitle(driver,journal_list[journal_counter])
for url in url_dict[journal]:
url = url + '&pubTitles=' + pubTitles # update every url in the list
driver.get(url_dict[journal][year][0]) # reload the first page with the new url
except:
pass # if there is an exception, it means we are on the right page
scraped_elems = scrape_page(driver) # scrape the page
scraped_urls, titles = clean(scraped_elems)
proxy_urls = proxify(scraped_urls,uw_prefix) # not even sure this is needed
write_urls(proxy_urls,titles,file,journal,year)
url_counter += len(proxy_urls)
print('Total URLs saved is: ',url_counter)
if len(scraped_elems) < 100: # after content is saved, go to the next year
break # because we know this is the last page of urls for this year
file.close()
driver.quit()
| 33.730233 | 125 | 0.628792 |
0a7ef598ad33e1712e909b5218a858b7b8de970f
| 1,903 |
py
|
Python
|
superset/typing.py
|
GodelTech/superset
|
da170aa57e94053cf715f7b41b09901c813a149a
|
[
"Apache-2.0"
] | 7 |
2020-07-31T04:50:01.000Z
|
2021-12-08T07:56:42.000Z
|
superset/typing.py
|
GodelTech/superset
|
da170aa57e94053cf715f7b41b09901c813a149a
|
[
"Apache-2.0"
] | 77 |
2020-02-02T07:54:13.000Z
|
2022-03-23T18:22:04.000Z
|
superset/typing.py
|
GodelTech/superset
|
da170aa57e94053cf715f7b41b09901c813a149a
|
[
"Apache-2.0"
] | 6 |
2020-03-25T01:02:29.000Z
|
2021-05-12T17:11:19.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from flask import Flask
from flask_caching import Cache
from werkzeug.wrappers import Response
CacheConfig = Union[Callable[[Flask], Cache], Dict[str, Any]]
DbapiDescriptionRow = Tuple[
str, str, Optional[str], Optional[str], Optional[int], Optional[int], bool
]
DbapiDescription = Union[List[DbapiDescriptionRow], Tuple[DbapiDescriptionRow, ...]]
DbapiResult = Sequence[Union[List[Any], Tuple[Any, ...]]]
FilterValue = Union[datetime, float, int, str]
FilterValues = Union[FilterValue, List[FilterValue], Tuple[FilterValue]]
FormData = Dict[str, Any]
Granularity = Union[str, Dict[str, Union[str, float]]]
AdhocMetric = Dict[str, Any]
Metric = Union[AdhocMetric, str]
OrderBy = Tuple[Metric, bool]
QueryObjectDict = Dict[str, Any]
VizData = Optional[Union[List[Any], Dict[Any, Any]]]
VizPayload = Dict[str, Any]
# Flask response.
Base = Union[bytes, str]
Status = Union[int, str]
Headers = Dict[str, Any]
FlaskResponse = Union[
Response, Base, Tuple[Base, Status], Tuple[Base, Status, Headers],
]
| 39.645833 | 84 | 0.755123 |
0a7f17dbb71fa4b55ccb4daea833fc07286f055d
| 2,042 |
py
|
Python
|
log_system_information.py
|
ibaiGorordo/depthai
|
57b437f38ebe80e870ee4852ca7ccc80eaaa76cc
|
[
"MIT"
] | 476 |
2020-04-21T11:38:55.000Z
|
2022-03-29T02:59:34.000Z
|
log_system_information.py
|
ibaiGorordo/depthai
|
57b437f38ebe80e870ee4852ca7ccc80eaaa76cc
|
[
"MIT"
] | 440 |
2020-04-15T19:15:01.000Z
|
2022-03-31T21:17:33.000Z
|
log_system_information.py
|
ibaiGorordo/depthai
|
57b437f38ebe80e870ee4852ca7ccc80eaaa76cc
|
[
"MIT"
] | 124 |
2020-04-23T19:23:25.000Z
|
2022-03-30T19:12:36.000Z
|
#!/usr/bin/env python3
import json
import platform
if __name__ == "__main__":
data = make_sys_report()
with open("log_system_information.json", "w") as f:
json.dump(data, f, indent=4)
print(json.dumps(data, indent=4))
print("System info gathered successfully - saved as \"log_system_information.json\"")
| 34.610169 | 89 | 0.589128 |
0a7f1dd168a64e7f7f19d3324731c892ec275922
| 1,845 |
py
|
Python
|
patch.py
|
silverhikari/romtools
|
2a09290fef85f35502a95c5c2874317029f0439c
|
[
"Apache-2.0"
] | 5 |
2018-02-02T06:36:56.000Z
|
2020-12-21T20:17:20.000Z
|
patch.py
|
silverhikari/romtools
|
2a09290fef85f35502a95c5c2874317029f0439c
|
[
"Apache-2.0"
] | 8 |
2017-10-10T17:50:47.000Z
|
2021-06-02T00:02:58.000Z
|
patch.py
|
silverhikari/romtools
|
2a09290fef85f35502a95c5c2874317029f0439c
|
[
"Apache-2.0"
] | 2 |
2017-10-10T20:15:24.000Z
|
2021-12-17T04:50:16.000Z
|
"""
Utils for creating xdelta patches.
"""
import logging
from subprocess import check_output, CalledProcessError
from shutil import copyfile
from os import remove, path
| 27.132353 | 78 | 0.564228 |
0a7fa4ecf8696f5df75632b66c0092d937a89bf0
| 2,346 |
py
|
Python
|
enigma.py
|
fewieden/Enigma-Machine
|
0c130d3cf1bb5146d438cc39dca55ebbcb0f1cdf
|
[
"MIT"
] | 1 |
2018-10-29T10:46:10.000Z
|
2018-10-29T10:46:10.000Z
|
enigma.py
|
fewieden/Enigma-Machine
|
0c130d3cf1bb5146d438cc39dca55ebbcb0f1cdf
|
[
"MIT"
] | null | null | null |
enigma.py
|
fewieden/Enigma-Machine
|
0c130d3cf1bb5146d438cc39dca55ebbcb0f1cdf
|
[
"MIT"
] | 1 |
2021-09-05T16:18:25.000Z
|
2021-09-05T16:18:25.000Z
|
from rotor import Rotor
import sys
import getopt
if __name__ == '__main__':
main(sys.argv[1:])
| 29.696203 | 110 | 0.498721 |
0a8049edeef1e3bee26e482ae16b802069251b6f
| 6,780 |
py
|
Python
|
andersoncd/group.py
|
idc9/andersoncd
|
af2123b241e5f82f7c51b2bbf5196fb02723b582
|
[
"BSD-3-Clause"
] | null | null | null |
andersoncd/group.py
|
idc9/andersoncd
|
af2123b241e5f82f7c51b2bbf5196fb02723b582
|
[
"BSD-3-Clause"
] | null | null | null |
andersoncd/group.py
|
idc9/andersoncd
|
af2123b241e5f82f7c51b2bbf5196fb02723b582
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import numpy as np
from scipy import sparse
from numba import njit
from numpy.linalg import norm
from scipy.sparse.linalg import svds
from andersoncd.lasso import dual_lasso
def solver_group(
X, y, alpha, grp_size, max_iter=10000, tol=1e-4, f_gap=10, K=5,
use_acc=False, algo='bcd', compute_time=False, tmax=np.infty,
verbose=True):
"""Solve the GroupLasso with BCD/ISTA/FISTA, eventually with extrapolation.
Groups are contiguous, of size grp_size.
Objective:
norm(y - Xw, ord=2)**2 / 2 + alpha * sum_g ||w_{[g]}||_2
TODO: filled docstring
Parameters:
algo: string
'bcd', 'pgd', 'fista'
compute_time : bool, default=False
If you want to compute timings or not
tmax : float, default=1000
Maximum time (in seconds) the algorithm is allowed to run
alpha: strength of the group penalty
"""
is_sparse = sparse.issparse(X)
n_features = X.shape[1]
if n_features % grp_size != 0:
raise ValueError("n_features is not a multiple of group size")
n_groups = n_features // grp_size
_range = np.arange(n_groups)
groups = dict(
bcd=lambda: _range,
bcdshuf=lambda: np.random.choice(n_groups, n_groups, replace=False),
rbcd=lambda: np.random.choice(n_groups, n_groups, replace=True))
if not is_sparse and not np.isfortran(X):
X = np.asfortranarray(X)
last_K_w = np.zeros([K + 1, n_features])
U = np.zeros([K, n_features])
if algo in ('pgd', 'fista'):
if is_sparse:
L = svds(X, k=1)[1][0] ** 2
else:
L = norm(X, ord=2) ** 2
lc = np.zeros(n_groups)
for g in range(n_groups):
X_g = X[:, g * grp_size: (g + 1) * grp_size]
if is_sparse:
gram = (X_g.T @ X_g).todense()
lc[g] = norm(gram, ord=2)
else:
lc[g] = norm(X_g, ord=2) ** 2
w = np.zeros(n_features)
if algo == 'fista':
z = np.zeros(n_features)
t_new = 1
R = y.copy()
E = []
gaps = np.zeros(max_iter // f_gap)
if compute_time:
times = []
t_start = time.time()
for it in range(max_iter):
if it % f_gap == 0:
if algo == 'fista':
R = y - X @ w
p_obj = primal_grp(R, w, alpha, grp_size)
E.append(p_obj)
theta = R / alpha
if compute_time:
elapsed_times = time.time() - t_start
times.append(elapsed_times)
if verbose:
print("elapsed time: %f " % elapsed_times)
if elapsed_times > tmax:
break
d_norm_theta = np.max(
norm((X.T @ theta).reshape(-1, grp_size), axis=1))
if d_norm_theta > 1.:
theta /= d_norm_theta
d_obj = dual_lasso(y, theta, alpha)
gap = p_obj - d_obj
if verbose:
print("Iteration %d, p_obj::%.5f, d_obj::%.5f, gap::%.2e" %
(it, p_obj, d_obj, gap))
gaps[it // f_gap] = gap
if gap < tol:
print("Early exit")
break
if algo.endswith('bcd'):
if is_sparse:
_bcd_sparse(
X.data, X.indices, X.indptr, w, R, alpha, lc)
else:
_bcd(X, w, R, alpha, lc, groups[algo]())
elif algo == 'pgd':
w[:] = BST_vec(w + X.T @ R / L, alpha / L, grp_size)
R[:] = y - X @ w
elif algo == 'fista':
w_old = w.copy()
w[:] = BST_vec(z - X.T @ (X @ z - y) / L, alpha / L, grp_size)
t_old = t_new
t_new = (1. + np.sqrt(1 + 4 * t_old ** 2)) / 2.
z[:] = w + (t_old - 1.) / t_new * (w - w_old)
else:
raise ValueError("Unknown algo %s" % algo)
if use_acc:
if it < K + 1:
last_K_w[it] = w
else:
for k in range(K):
last_K_w[k] = last_K_w[k + 1]
last_K_w[K - 1] = w
for k in range(K):
U[k] = last_K_w[k + 1] - last_K_w[k]
C = np.dot(U, U.T)
try:
z = np.linalg.solve(C, np.ones(K))
c = z / z.sum()
w_acc = np.sum(last_K_w[:-1] * c[:, None],
axis=0)
p_obj = primal_grp(R, w, alpha, grp_size)
R_acc = y - X @ w_acc
p_obj_acc = primal_grp(R_acc, w_acc, alpha, grp_size)
if p_obj_acc < p_obj:
w = w_acc
R = R_acc
except np.linalg.LinAlgError:
if verbose:
print("----------Linalg error")
if compute_time:
return w, np.array(E), gaps[:it // f_gap + 1], times
return w, np.array(E), gaps[:it // f_gap + 1]
| 31.100917 | 79 | 0.488791 |
0a804f8203f3e605d7c6651f77fee25137c52bc6
| 6,259 |
py
|
Python
|
textattack/search_methods/greedy_word_swap_wir.py
|
dheerajrav/TextAttack
|
41e747215bb0f01c511af95b16b94704c780cd5a
|
[
"MIT"
] | null | null | null |
textattack/search_methods/greedy_word_swap_wir.py
|
dheerajrav/TextAttack
|
41e747215bb0f01c511af95b16b94704c780cd5a
|
[
"MIT"
] | null | null | null |
textattack/search_methods/greedy_word_swap_wir.py
|
dheerajrav/TextAttack
|
41e747215bb0f01c511af95b16b94704c780cd5a
|
[
"MIT"
] | null | null | null |
"""
Greedy Word Swap with Word Importance Ranking
===================================================
When WIR method is set to ``unk``, this is a reimplementation of the search
method from the paper: Is BERT Really Robust?
A Strong Baseline for Natural Language Attack on Text Classification and
Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and
https://github.com/jind11/TextFooler.
"""
import numpy as np
import torch
from torch.nn.functional import softmax
from textattack.goal_function_results import GoalFunctionResultStatus
from textattack.search_methods import SearchMethod
from textattack.shared.validators import (
transformation_consists_of_word_swaps_and_deletions,
)
| 41.450331 | 87 | 0.615594 |
0a81693f8777fdc22a6d886900c28851626fa805
| 377 |
py
|
Python
|
lemur/deployment/service.py
|
rajatsharma94/lemur
|
99f46c1addcd40154835e151d0b189e1578805bb
|
[
"Apache-2.0"
] | 1,656 |
2015-09-20T03:12:28.000Z
|
2022-03-29T18:00:54.000Z
|
lemur/deployment/service.py
|
rajatsharma94/lemur
|
99f46c1addcd40154835e151d0b189e1578805bb
|
[
"Apache-2.0"
] | 3,017 |
2015-09-18T23:15:24.000Z
|
2022-03-30T22:40:02.000Z
|
lemur/deployment/service.py
|
rajatsharma94/lemur
|
99f46c1addcd40154835e151d0b189e1578805bb
|
[
"Apache-2.0"
] | 401 |
2015-09-18T23:02:18.000Z
|
2022-02-20T16:13:14.000Z
|
from lemur import database
def rotate_certificate(endpoint, new_cert):
"""
Rotates a certificate on a given endpoint.
:param endpoint:
:param new_cert:
:return:
"""
# ensure that certificate is available for rotation
endpoint.source.plugin.update_endpoint(endpoint, new_cert)
endpoint.certificate = new_cert
database.update(endpoint)
| 23.5625 | 62 | 0.71618 |
0a819052de1a3c3f8fa2090ece2179f25885147a
| 767 |
py
|
Python
|
pype/celery.py
|
h2020-westlife-eu/VRE
|
a85d5370767939b1971415be48a551ae6b1edc5d
|
[
"MIT"
] | 1 |
2016-06-28T13:13:27.000Z
|
2016-06-28T13:13:27.000Z
|
pype/celery.py
|
h2020-westlife-eu/VRE
|
a85d5370767939b1971415be48a551ae6b1edc5d
|
[
"MIT"
] | 12 |
2016-06-28T11:19:46.000Z
|
2017-05-05T14:24:14.000Z
|
pype/celery.py
|
h2020-westlife-eu/VRE
|
a85d5370767939b1971415be48a551ae6b1edc5d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright Luna Technology 2015
# Matthieu Riviere <[email protected]>
from __future__ import absolute_import
import os
from celery import Celery
# Set the default Django settings module for the 'celery' program
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pype.settings')
from django.conf import settings
from celery.signals import setup_logging
app = Celery('pype')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 23.96875 | 65 | 0.783572 |
0a82635dab6776cd0ffd24a37efd7bf2386d6303
| 1,827 |
py
|
Python
|
train/general_train_example/1_parse.py
|
ss433s/sosweety
|
4cb1a0f061f26e509ee51c0fabd0284ad15804a5
|
[
"MIT"
] | null | null | null |
train/general_train_example/1_parse.py
|
ss433s/sosweety
|
4cb1a0f061f26e509ee51c0fabd0284ad15804a5
|
[
"MIT"
] | null | null | null |
train/general_train_example/1_parse.py
|
ss433s/sosweety
|
4cb1a0f061f26e509ee51c0fabd0284ad15804a5
|
[
"MIT"
] | null | null | null |
import os, sys
import json
# anchorroot
this_file_path = os.path.split(os.path.realpath(__file__))[0]
this_path = this_file_path
root_path = this_file_path
while this_path:
if os.path.exists(os.path.join(this_path, 'sosweety_root_anchor.py')):
root_path = this_path
break
par_path = os.path.dirname(this_path)
# print(par_path)
if par_path == this_path:
break
else:
this_path = par_path
sys.path.append(root_path)
from modules.sParser.sParser import sParser
from modules.knowledgebase.kb import KnowledgeBase
train_dir = 'data/train_zh_wiki'
train_dir = os.path.join(root_path, train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# parse result file
parse_result_dir = 'parse_result'
parse_result_dir = os.path.join(train_dir, parse_result_dir)
if not os.path.exists(parse_result_dir):
os.makedirs(parse_result_dir)
pos_tags_file_name = 'pos_tags_file'
pos_tags_file_path = os.path.join(parse_result_dir, pos_tags_file_name)
KB = KnowledgeBase()
parser = sParser(KB)
with open(pos_tags_file_path, 'w') as pos_tags_file:
#
file_path = 'data/corpus/zh_wiki/wiki_test'
file_path = os.path.join(root_path, file_path)
file = open(file_path)
line = file.readline()
count = 0
while line:
count += 1
if count % 5000 == 0:
print('parsed %s sentence' % count)
text = line.strip()
try:
ss_pos_tags = parser.text2ss_pos_tags(text)
for pos_tags in ss_pos_tags:
pos_tags_file.write(json.dumps(pos_tags, ensure_ascii=False) + '\n')
except Exception:
print('line %s decode error' % count)
line = file.readline()
file.close()
| 29.467742 | 85 | 0.667761 |
0a82a3ff8df3d7d05c880b80e09b0d2ae4679de0
| 16,834 |
py
|
Python
|
ruleex/hypinv/model.py
|
rohancode/ruleex_modified
|
ec974e7811fafc0c06d4d2c53b4e2898dd6b7305
|
[
"Apache-2.0"
] | 18 |
2019-09-19T09:50:52.000Z
|
2022-03-20T13:59:20.000Z
|
ruleex/hypinv/model.py
|
rohancode/ruleex_modified
|
ec974e7811fafc0c06d4d2c53b4e2898dd6b7305
|
[
"Apache-2.0"
] | 3 |
2020-10-31T05:15:32.000Z
|
2022-02-10T00:34:05.000Z
|
ruleex/hypinv/model.py
|
rohancode/ruleex_modified
|
ec974e7811fafc0c06d4d2c53b4e2898dd6b7305
|
[
"Apache-2.0"
] | 7 |
2020-12-06T20:55:50.000Z
|
2021-12-11T18:14:51.000Z
|
from gtrain import Model
import numpy as np
import tensorflow as tf
#________________________________________EXAMPLES_OF_NetForHypinv_CLASS_____________________________________________
| 45.010695 | 159 | 0.594214 |
0a8392531b265c3630ab7efd862cf9bb543e8116
| 126 |
py
|
Python
|
py_tdlib/constructors/get_chat_member.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24 |
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/get_chat_member.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3 |
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/get_chat_member.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5 |
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Method
| 18 | 32 | 0.690476 |
0a83eff7c2dc361748d280106f49c290cfe4b19f
| 6,474 |
py
|
Python
|
src/phrase_manager/phrase_manager.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | null | null | null |
src/phrase_manager/phrase_manager.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | 6 |
2020-01-28T23:09:44.000Z
|
2022-02-10T01:16:59.000Z
|
src/phrase_manager/phrase_manager.py
|
Franco7Scala/GeneratingNaturalLanguageAdversarialExamplesThroughParticleFiltering
|
095b47eb76503d44f54f701d303193328a5a4c86
|
[
"MIT"
] | null | null | null |
import numpy
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from src.support import support
| 40.21118 | 143 | 0.61384 |
0a842caaf70906bbc1cf4b3a4ba3ba9841f2aa9c
| 1,172 |
py
|
Python
|
setup.py
|
fonar/paypalhttp_python
|
218136324c3dc6d4021db907c94cb6ac30cb1060
|
[
"MIT"
] | null | null | null |
setup.py
|
fonar/paypalhttp_python
|
218136324c3dc6d4021db907c94cb6ac30cb1060
|
[
"MIT"
] | null | null | null |
setup.py
|
fonar/paypalhttp_python
|
218136324c3dc6d4021db907c94cb6ac30cb1060
|
[
"MIT"
] | null | null | null |
from setuptools import setup
version = "1.0.0"
long_description = """
PayPalHttp is a generic http client designed to be used with code-generated projects.
"""
setup(
name="paypalhttp",
long_description=long_description,
version=version,
author="PayPal",
packages=["paypalhttp", "paypalhttp/testutils", "paypalhttp/serializers"],
install_requires=['requests>=2.0.0', 'six>=1.0.0', 'pyopenssl>=0.15'],
license="MIT",
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 34.470588 | 86 | 0.620307 |
0a8510d776fffba4b52eff1b8a24d1b7d723d4dd
| 1,836 |
py
|
Python
|
ooobuild/csslo/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/csslo/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/csslo/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...lo.xml.attribute import Attribute as Attribute
from ...lo.xml.attribute_container import AttributeContainer as AttributeContainer
from ...lo.xml.attribute_data import AttributeData as AttributeData
from ...lo.xml.export_filter import ExportFilter as ExportFilter
from ...lo.xml.fast_attribute import FastAttribute as FastAttribute
from ...lo.xml.import_filter import ImportFilter as ImportFilter
from ...lo.xml.namespace_container import NamespaceContainer as NamespaceContainer
from ...lo.xml.para_user_defined_attributes_supplier import ParaUserDefinedAttributesSupplier as ParaUserDefinedAttributesSupplier
from ...lo.xml.text_user_defined_attributes_supplier import TextUserDefinedAttributesSupplier as TextUserDefinedAttributesSupplier
from ...lo.xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier
from ...lo.xml.x_export_filter import XExportFilter as XExportFilter
from ...lo.xml.x_import_filter import XImportFilter as XImportFilter
from ...lo.xml.x_import_filter2 import XImportFilter2 as XImportFilter2
from ...lo.xml.xml_export_filter import XMLExportFilter as XMLExportFilter
from ...lo.xml.xml_import_filter import XMLImportFilter as XMLImportFilter
| 57.375 | 130 | 0.827887 |
0a854fbf5fe92dd3c9a7f42e69f796c6cc578917
| 333 |
py
|
Python
|
bluebottle/tasks/migrations/0012_merge.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10 |
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/tasks/migrations/0012_merge.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762 |
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/tasks/migrations/0012_merge.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9 |
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-27 15:35
from __future__ import unicode_literals
from django.db import migrations
| 19.588235 | 47 | 0.657658 |
0a85751a815d71753d3e2aaa3ccbd06b815ba219
| 5,200 |
py
|
Python
|
bat_train/evaluate.py
|
bgotthold-usgs/batdetect
|
0d4a70f1cda9f6104f6f785f0d953f802fddf0f1
|
[
"BSD-Source-Code"
] | 59 |
2018-03-05T08:58:59.000Z
|
2022-03-19T17:33:14.000Z
|
bat_train/evaluate.py
|
bgotthold-usgs/batdetect
|
0d4a70f1cda9f6104f6f785f0d953f802fddf0f1
|
[
"BSD-Source-Code"
] | 11 |
2018-03-16T21:46:51.000Z
|
2021-12-14T16:07:55.000Z
|
bat_train/evaluate.py
|
bgotthold-usgs/batdetect
|
0d4a70f1cda9f6104f6f785f0d953f802fddf0f1
|
[
"BSD-Source-Code"
] | 24 |
2018-03-15T14:48:08.000Z
|
2022-01-09T01:12:51.000Z
|
import numpy as np
from sklearn.metrics import roc_curve, auc
def prec_recall_1d(nms_pos_o, nms_prob_o, gt_pos_o, durations, detection_overlap, win_size, remove_eof=True):
"""
nms_pos, nms_prob, and gt_pos are lists of numpy arrays specifying detection
position, detection probability and GT position.
Each list entry is a different file.
Each entry in nms_pos is an array of length num_entries. For nms_prob and
gt_pos its an array of size (num_entries, 1).
durations is a array of the length of the number of files with each entry
containing that file length in seconds.
detection_overlap determines if a prediction is counted as correct or not.
win_size is used to ignore predictions and ground truth at the end of an
audio file.
returns
precision: fraction of retrieved instances that are relevant.
recall: fraction of relevant instances that are retrieved.
"""
if remove_eof:
# filter out the detections in both ground truth and predictions that are too
# close to the end of the file - dont count them during eval
nms_pos, nms_prob, gt_pos = remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size)
else:
nms_pos = nms_pos_o
nms_prob = nms_prob_o
gt_pos = gt_pos_o
# loop through each file
true_pos = [] # correctly predicts the ground truth
false_pos = [] # says there is a detection but isn't
for ii in range(len(nms_pos)):
num_preds = nms_pos[ii].shape[0]
if num_preds > 0: # check to make sure it contains something
num_gt = gt_pos[ii].shape[0]
# for each set of predictions label them as true positive or false positive (i.e. 1-tp)
tp = np.zeros(num_preds)
distance_to_gt = np.abs(gt_pos[ii].ravel()-nms_pos[ii].ravel()[:, np.newaxis])
within_overlap = (distance_to_gt <= detection_overlap)
# remove duplicate detections - assign to valid detection with highest prob
for jj in range(num_gt):
inds = np.where(within_overlap[:, jj])[0] # get the indices of all valid predictions
if inds.shape[0] > 0:
max_prob = np.argmax(nms_prob[ii][inds])
selected_pred = inds[max_prob]
within_overlap[selected_pred, :] = False
tp[selected_pred] = 1 # set as true positives
true_pos.append(tp)
false_pos.append(1 - tp)
# calc precision and recall - sort confidence in descending order
# PASCAL style
conf = np.concatenate(nms_prob)[:, 0]
num_gt = np.concatenate(gt_pos).shape[0]
inds = np.argsort(conf)[::-1]
true_pos_cat = np.concatenate(true_pos)[inds].astype(float)
false_pos_cat = np.concatenate(false_pos)[inds].astype(float) # i.e. 1-true_pos_cat
if (conf == conf[0]).sum() == conf.shape[0]:
# all the probability values are the same therefore we will not sweep
# the curve and instead will return a single value
true_pos_sum = true_pos_cat.sum()
false_pos_sum = false_pos_cat.sum()
recall = np.asarray([true_pos_sum / float(num_gt)])
precision = np.asarray([(true_pos_sum / (false_pos_sum + true_pos_sum))])
elif inds.shape[0] > 0:
# otherwise produce a list of values
true_pos_cum = np.cumsum(true_pos_cat)
false_pos_cum = np.cumsum(false_pos_cat)
recall = true_pos_cum / float(num_gt)
precision = (true_pos_cum / (false_pos_cum + true_pos_cum))
return precision, recall
| 38.80597 | 109 | 0.649038 |
0a86094f8b6e8a0e12d48278a3971b48591f4ec2
| 27,399 |
py
|
Python
|
azure-mgmt/tests/test_mgmt_network.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 2 |
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure-mgmt/tests/test_mgmt_network.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1 |
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure-mgmt/tests/test_mgmt_network.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1 |
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.network.models
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 33.454212 | 115 | 0.567904 |
0a862e609f431ba255f2003bb9d5372890839f22
| 1,680 |
py
|
Python
|
Networks/Threading/server.py
|
polbebe/PinkPanther
|
c6ba47956b2cae6468ac0cfe56229b5434fec754
|
[
"MIT"
] | null | null | null |
Networks/Threading/server.py
|
polbebe/PinkPanther
|
c6ba47956b2cae6468ac0cfe56229b5434fec754
|
[
"MIT"
] | null | null | null |
Networks/Threading/server.py
|
polbebe/PinkPanther
|
c6ba47956b2cae6468ac0cfe56229b5434fec754
|
[
"MIT"
] | null | null | null |
import gym
import gym.spaces as spaces
import sys
import socket
from _thread import *
import os
import numpy as np
import pandas as pd
import math as m
import time
import random
if __name__ == '__main__':
# Construct MAIN SERVER object
env = NetEnv()
#WALK
for i in range(100000):
env.step()
print('Done')
| 20 | 60 | 0.691667 |
0a86cf6fa8f86673090e299d74c945f26f918502
| 909 |
py
|
Python
|
backend/app/app/db/session.py
|
zhkuo24/full-stack-fastapi-demo
|
25b0d4e5c7fe303b751974748b687e98d4454f48
|
[
"MIT"
] | 7 |
2021-01-06T15:44:58.000Z
|
2022-02-12T05:07:10.000Z
|
backend/app/app/db/session.py
|
zhkuo24/full-stack-fastapi-demo
|
25b0d4e5c7fe303b751974748b687e98d4454f48
|
[
"MIT"
] | null | null | null |
backend/app/app/db/session.py
|
zhkuo24/full-stack-fastapi-demo
|
25b0d4e5c7fe303b751974748b687e98d4454f48
|
[
"MIT"
] | 2 |
2022-03-09T23:20:06.000Z
|
2022-03-11T02:48:08.000Z
|
# -*- coding: utf-8 -*-
# @File : session.py
# @Author : zhkuo
# @Time : 2021/1/3 9:12
# @Desc :
from sqlalchemy import create_engine
# from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
"""
:
https://www.osgeo.cn/sqlalchemy/orm/session_basics.html
https://landybird.github.io/python/2020/03/02/fastapi%E4%B8%8Easgi(5)/
session https://github.com/tiangolo/fastapi/issues/726
session
1. sqlalchemy.orm scoped_session
2. db
3. dependency (
"""
# engine
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, connect_args={"check_same_thread": False})
# scoped_session
# db_session = scoped_session(
# sessionmaker(autocommit=False, autoflush=False, bind=engine)
# )
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
| 28.40625 | 99 | 0.766777 |
0a8741dde6ef103d06812289a7da5d5ee4748c1d
| 2,427 |
py
|
Python
|
src/tkdialog/dialog.py
|
KosukeMizuno/tkdialog
|
082fc106908bbbfa819d1a129929165f11d4e944
|
[
"MIT"
] | null | null | null |
src/tkdialog/dialog.py
|
KosukeMizuno/tkdialog
|
082fc106908bbbfa819d1a129929165f11d4e944
|
[
"MIT"
] | null | null | null |
src/tkdialog/dialog.py
|
KosukeMizuno/tkdialog
|
082fc106908bbbfa819d1a129929165f11d4e944
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pickle
import tkinter as tk
import tkinter.filedialog
def open_dialog(**opt):
"""Parameters
----------
Options will be passed to `tkinter.filedialog.askopenfilename`.
See also tkinter's document.
Followings are example of frequently used options.
- filetypes=[(label, ext), ...]
- label: str
- ext: str, semicolon separated extentions
- initialdir: str, default Path.cwd()
- multiple: bool, default False
Returns
--------
filename, str
"""
root = tk.Tk()
root.withdraw()
root.wm_attributes("-topmost", True)
opt_default = dict(initialdir=Path.cwd())
_opt = dict(opt_default, **opt)
return tk.filedialog.askopenfilename(**_opt)
def saveas_dialog(**opt):
"""Parameters
----------
Options will be passed to `tkinter.filedialog.asksaveasfilename`.
See also tkinter's document.
Followings are example of frequently used options.
- filetypes=[(label, ext), ...]
- label: str
- ext: str, semicolon separated extentions
- initialdir: str, default Path.cwd()
- initialfile: str, default isn't set
Returns
--------
filename, str
"""
root = tk.Tk()
root.withdraw()
root.wm_attributes("-topmost", True)
opt_default = dict(initialdir=Path.cwd())
_opt = dict(opt_default, **opt)
return tk.filedialog.asksaveasfilename(**_opt)
def load_pickle_with_dialog(mode='rb', **opt):
"""Load a pickled object with a filename assigned by tkinter's open dialog.
kwargs will be passed to saveas_dialog.
"""
opt_default = dict(filetypes=[('pickled data', '*.pkl'), ('all', '*')])
_opt = dict(opt_default, **opt)
fn = open_dialog(**_opt)
if fn == '': # canceled
return None
with Path(fn).open(mode) as f:
data = pickle.load(f)
return data
def dump_pickle_with_dialog(obj, mode='wb', **opt):
"""Pickle an object with a filename assigned by tkinter's saveas dialog.
kwargs will be passed to saveas_dialog.
Returns
--------
filename: str
"""
opt_default = dict(filetypes=[('pickled data', '*.pkl'), ('all', '*')])
_opt = dict(opt_default, **opt)
fn = saveas_dialog(**_opt)
if fn == '': # canceled
return ''
# note: tkinter
with Path(fn).open(mode) as f:
pickle.dump(obj, f)
return fn
| 25.547368 | 79 | 0.622167 |
0a880ef41f3bfd67c8ea6c85667d8aef79348500
| 1,744 |
py
|
Python
|
cinder/tests/unit/fake_group_snapshot.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 571 |
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/tests/unit/fake_group_snapshot.py
|
vexata/cinder
|
7b84c0842b685de7ee012acec40fb4064edde5e9
|
[
"Apache-2.0"
] | 37 |
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/tests/unit/fake_group_snapshot.py
|
vexata/cinder
|
7b84c0842b685de7ee012acec40fb4064edde5e9
|
[
"Apache-2.0"
] | 841 |
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from cinder import objects
from cinder.tests.unit import fake_constants as fake
| 33.538462 | 78 | 0.679472 |
0a88b532a2c292c3b22e23456f113d6c77d67696
| 1,258 |
py
|
Python
|
src/tree_visualizer.py
|
szymanskir/msi
|
27013bac31e62b36dff138cfbb91852c96f77ef3
|
[
"MIT"
] | null | null | null |
src/tree_visualizer.py
|
szymanskir/msi
|
27013bac31e62b36dff138cfbb91852c96f77ef3
|
[
"MIT"
] | null | null | null |
src/tree_visualizer.py
|
szymanskir/msi
|
27013bac31e62b36dff138cfbb91852c96f77ef3
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
| 33.105263 | 120 | 0.702703 |
0a89d9e3455e77e62d24b044c32fc90cbc464fc1
| 368 |
py
|
Python
|
setup.py
|
SilicalNZ/canvas
|
44d1eee02c334aae6b41aeba01ed0ecdf83aed21
|
[
"MIT"
] | 7 |
2019-08-04T20:37:55.000Z
|
2020-03-05T08:36:10.000Z
|
setup.py
|
SilicalNZ/canvas
|
44d1eee02c334aae6b41aeba01ed0ecdf83aed21
|
[
"MIT"
] | 1 |
2019-10-21T05:43:28.000Z
|
2019-10-21T05:43:28.000Z
|
setup.py
|
SilicalNZ/canvas
|
44d1eee02c334aae6b41aeba01ed0ecdf83aed21
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name = 'sili-canvas',
version = '0.0.1',
license = 'MIT',
url = 'https://github.com/SilicalNZ/canvas',
description = 'A series of easy to use classes to perform complex 2D array transformations',
long_description = '',
author = 'SilicalNZ',
packages = ['canvas', 'canvas.common', 'canvas.tools']
)
| 26.285714 | 96 | 0.649457 |
0a8a41bb9d474871c5ea7be817390ae9d2fe8454
| 49,397 |
py
|
Python
|
tests/viz_tests.py
|
theoretical-olive/incubator-superset
|
72fc581b1559e7ce08b11c481b88eaa01b2d17de
|
[
"Apache-2.0"
] | 2 |
2020-06-29T20:02:34.000Z
|
2020-06-29T20:02:35.000Z
|
tests/viz_tests.py
|
theoretical-olive/incubator-superset
|
72fc581b1559e7ce08b11c481b88eaa01b2d17de
|
[
"Apache-2.0"
] | null | null | null |
tests/viz_tests.py
|
theoretical-olive/incubator-superset
|
72fc581b1559e7ce08b11c481b88eaa01b2d17de
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
| 38.381507 | 88 | 0.494099 |
0a8a7f44a0585244eb2c07e0db4cb782cb9fe0fb
| 1,840 |
py
|
Python
|
chord_sim/modules/taskqueue.py
|
ryogrid/FunnelKVS
|
65c4308ce6e08b819b5396fc1aa658468c276362
|
[
"MIT"
] | 8 |
2022-01-12T00:46:25.000Z
|
2022-03-30T12:00:52.000Z
|
chord_sim/modules/taskqueue.py
|
ryogrid/FunnelKVS
|
65c4308ce6e08b819b5396fc1aa658468c276362
|
[
"MIT"
] | null | null | null |
chord_sim/modules/taskqueue.py
|
ryogrid/FunnelKVS
|
65c4308ce6e08b819b5396fc1aa658468c276362
|
[
"MIT"
] | 1 |
2022-01-12T06:22:31.000Z
|
2022-01-12T06:22:31.000Z
|
# coding:utf-8
from typing import Dict, List, Optional, cast, TYPE_CHECKING
from .chord_util import ChordUtil, InternalControlFlowException, NodeIsDownedExceptiopn
if TYPE_CHECKING:
from .chord_node import ChordNode
| 41.818182 | 134 | 0.613587 |
0a8b4fc2b42148f674fa2146ee9800ea9e96f927
| 2,614 |
py
|
Python
|
surname_rnn/surname/containers.py
|
sudarshan85/nlpbook
|
41e59d706fb31f5185a0133789639ccffbddb41f
|
[
"Apache-2.0"
] | null | null | null |
surname_rnn/surname/containers.py
|
sudarshan85/nlpbook
|
41e59d706fb31f5185a0133789639ccffbddb41f
|
[
"Apache-2.0"
] | null | null | null |
surname_rnn/surname/containers.py
|
sudarshan85/nlpbook
|
41e59d706fb31f5185a0133789639ccffbddb41f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import pandas as pd
from pathlib import Path
from torch.utils.data import DataLoader
| 33.512821 | 100 | 0.729533 |
0a8d1e23712a4b58170f56ad1d0354b9b57142a5
| 45 |
py
|
Python
|
AudioLib/__init__.py
|
yNeshy/voice-change
|
2535351bcd8a9f2d58fcbff81a2051c4f6ac6ab4
|
[
"MIT"
] | 11 |
2021-02-04T11:35:37.000Z
|
2022-03-26T10:32:00.000Z
|
AudioLib/__init__.py
|
yNeshy/voice-change
|
2535351bcd8a9f2d58fcbff81a2051c4f6ac6ab4
|
[
"MIT"
] | 4 |
2021-03-22T09:36:54.000Z
|
2021-03-26T09:10:51.000Z
|
AudioLib/__init__.py
|
yNeshy/voice-change
|
2535351bcd8a9f2d58fcbff81a2051c4f6ac6ab4
|
[
"MIT"
] | 6 |
2021-02-24T09:03:35.000Z
|
2021-11-16T02:00:53.000Z
|
from AudioLib.AudioEffect import AudioEffect
| 22.5 | 44 | 0.888889 |
0a8f1638c1d0ec4d963f02a274edb9bb4662cfb2
| 679 |
py
|
Python
|
programs/buck_logging.py
|
lakshmi2005/buck
|
012a59d5d2e5a45b483e85fb190d2b67ea0c56ab
|
[
"Apache-2.0"
] | 1 |
2018-02-28T06:26:56.000Z
|
2018-02-28T06:26:56.000Z
|
programs/buck_logging.py
|
lakshmi2005/buck
|
012a59d5d2e5a45b483e85fb190d2b67ea0c56ab
|
[
"Apache-2.0"
] | 1 |
2018-12-10T15:54:22.000Z
|
2018-12-10T19:30:37.000Z
|
programs/buck_logging.py
|
lakshmi2005/buck
|
012a59d5d2e5a45b483e85fb190d2b67ea0c56ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
| 27.16 | 78 | 0.624448 |
0a8fdb2b5cc10e441111eda628478417245011ef
| 5,283 |
py
|
Python
|
official/cv/c3d/src/c3d_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/c3d/src/c3d_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/c3d/src/c3d_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import mindspore.nn as nn
import mindspore.ops as P
from mindspore.common import initializer as init
from src.utils import default_recurisive_init, KaimingNormal
| 40.638462 | 100 | 0.570509 |
0a90c84a059304b0e838dbe80594658dfad7edd3
| 2,119 |
py
|
Python
|
blmath/geometry/apex.py
|
metabolize/blmath
|
8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da
|
[
"BSD-2-Clause"
] | 6 |
2019-09-28T16:48:34.000Z
|
2022-03-25T17:05:46.000Z
|
blmath/geometry/apex.py
|
metabolize/blmath
|
8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da
|
[
"BSD-2-Clause"
] | 6 |
2019-09-09T16:42:02.000Z
|
2021-06-25T15:25:50.000Z
|
blmath/geometry/apex.py
|
metabolize/blmath
|
8ea8d7be60349a60ffeb08a3e34fca20ef9eb0da
|
[
"BSD-2-Clause"
] | 4 |
2017-05-09T16:15:07.000Z
|
2019-02-15T14:15:30.000Z
|
import numpy as np
from blmath.numerics import vx
def apex(points, axis):
'''
Find the most extreme point in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array.
'''
coords_on_axis = points.dot(axis)
return points[np.argmax(coords_on_axis)]
def inflection_points(points, axis, span):
'''
Find the list of vertices that preceed inflection points in a curve. The curve is differentiated
with respect to the coordinate system defined by axis and span.
axis: A vector representing the vertical axis of the coordinate system.
span: A vector representing the the horiztonal axis of the coordinate system.
returns: a list of points in space corresponding to the vertices that
immediately preceed inflection points in the curve
'''
coords_on_span = points.dot(span)
dx = np.gradient(coords_on_span)
coords_on_axis = points.dot(axis)
# Take the second order finite difference of the curve with respect to the
# defined coordinate system
finite_difference_2 = np.gradient(np.gradient(coords_on_axis, dx), dx)
# Compare the product of all neighboring pairs of points in the second derivative
# If a pair of points has a negative product, then the second derivative changes sign
# at one of those points, signalling an inflection point
is_inflection_point = [finite_difference_2[i] * finite_difference_2[i + 1] <= 0 for i in range(len(finite_difference_2) - 1)]
inflection_point_indices = [i for i, b in enumerate(is_inflection_point) if b]
if len(inflection_point_indices) == 0: # pylint: disable=len-as-condition
return []
return points[inflection_point_indices]
def farthest(from_point, to_points):
'''
Find the farthest point among the inputs, to the given point.
Return a tuple: farthest_point, index_of_farthest_point.
'''
absolute_distances = vx.magnitude(to_points - from_point)
index_of_farthest_point = np.argmax(absolute_distances)
farthest_point = to_points[index_of_farthest_point]
return farthest_point, index_of_farthest_point
| 36.534483 | 129 | 0.738084 |
0a9150e1ffc2d578382971b5ac300e3f70157319
| 637 |
py
|
Python
|
examples/client/main.py
|
TheFarGG/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 3 |
2021-11-06T11:07:18.000Z
|
2022-03-18T09:04:42.000Z
|
examples/client/main.py
|
UnrealFar/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 3 |
2021-11-06T11:22:05.000Z
|
2022-03-12T16:36:52.000Z
|
examples/client/main.py
|
UnrealFar/Discode
|
facf6cd4f82baef2288a23dbe6f2a02dfc2407e2
|
[
"MIT"
] | 4 |
2021-11-06T11:08:26.000Z
|
2022-03-12T14:25:57.000Z
|
import os
import discode
TOKEN = os.environ.get("TOKEN")
# The token from the developer portal.
client = discode.Client(token=TOKEN, intents=discode.Intents.default())
# The ready listener gets fired when the bot/client is completely ready for use.
# The message_create listener is fired whenever a message is sent to any channel that the bot has access to.
| 23.592593 | 108 | 0.726845 |
0a91aa29e60075c1d841a9fa42cfbeabf426976a
| 2,225 |
py
|
Python
|
timm/models/layers/__init__.py
|
kkahatapitiya/pytorch-image-models
|
94f9d54ac22354f3cf7ada9a7304ac97143deb14
|
[
"Apache-2.0"
] | null | null | null |
timm/models/layers/__init__.py
|
kkahatapitiya/pytorch-image-models
|
94f9d54ac22354f3cf7ada9a7304ac97143deb14
|
[
"Apache-2.0"
] | null | null | null |
timm/models/layers/__init__.py
|
kkahatapitiya/pytorch-image-models
|
94f9d54ac22354f3cf7ada9a7304ac97143deb14
|
[
"Apache-2.0"
] | null | null | null |
from .activations import *
from .adaptive_avgmax_pool import \
adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
from .blur_pool import BlurPool2d
from .classifier import ClassifierHead, create_classifier
from .cond_conv2d import CondConv2d, get_condconv_initializer
from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
set_layer_config
from .conv2d_same import Conv2dSame, conv2d_same
from .conv_bn_act import ConvBnAct
from .create_act import create_act_layer, get_act_layer, get_act_fn
from .create_attn import get_attn, create_attn
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act
from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
from .evo_norm import EvoNormBatch2d, EvoNormSample2d
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible
from .inplace_abn import InplaceAbn
from .involution import Involution
from .linear import Linear
from .mixed_conv2d import MixedConv2d
from .mlp import Mlp, GluMlp, GatedMlp, ConvMlpGeneral, ConvMlpGeneralv2
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .norm import GroupNorm, LayerNorm2d
from .norm_act import BatchNormAct2d, GroupNormAct
from .padding import get_padding, get_same_padding, pad_same
from .patch_embed import PatchEmbed
from .pool2d_same import AvgPool2dSame, create_pool2d
from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
from .selective_kernel import SelectiveKernel
from .separable_conv import SeparableConv2d, SeparableConvBnAct
from .space_to_depth import SpaceToDepthModule
from .split_attn import SplitAttn
from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
from .test_time_pool import TestTimePoolHead, apply_test_time_pool
from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_
| 54.268293 | 105 | 0.865169 |
0a93d3f57e61e9da5895ceeab547d073a015db76
| 468 |
py
|
Python
|
riccipy/metrics/bondi_2.py
|
cjayross/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | 4 |
2019-08-17T04:28:06.000Z
|
2021-01-02T15:19:18.000Z
|
riccipy/metrics/bondi_2.py
|
grdbii/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | 3 |
2019-08-02T04:07:43.000Z
|
2020-06-18T07:49:38.000Z
|
riccipy/metrics/bondi_2.py
|
grdbii/riccipy
|
2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846
|
[
"MIT"
] | null | null | null |
"""
Name: Bondi
References: Bondi, Proc. Roy. Soc. Lond. A, v282, p303, (1964)
Coordinates: Spherical
Symmetry: Spherical
Notes: Outgoing Coordinates
"""
from sympy import Function, diag, sin, symbols
coords = symbols("r v theta phi", real=True)
variables = ()
functions = symbols("C M", cls=Function)
r, v, th, ph = coords
C, M = functions
metric = diag(0, -C(r, v) ** 2 * (1 - 2 * M(r, v) / r), r ** 2, r ** 2 * sin(th) ** 2)
metric[0, 1] = metric[1, 0] = -C(r, v)
| 27.529412 | 86 | 0.621795 |
0a94bf94b86f2d0bf5c868fffdac3fa72c685955
| 19,040 |
py
|
Python
|
cfgov/ask_cfpb/tests/test_views.py
|
atuggle/cfgov-refresh
|
5a9cfd92b460b9be7befb39f5845abf56857aeac
|
[
"CC0-1.0"
] | null | null | null |
cfgov/ask_cfpb/tests/test_views.py
|
atuggle/cfgov-refresh
|
5a9cfd92b460b9be7befb39f5845abf56857aeac
|
[
"CC0-1.0"
] | 1 |
2016-09-14T21:11:19.000Z
|
2016-09-14T21:11:19.000Z
|
cfgov/ask_cfpb/tests/test_views.py
|
atuggle/cfgov-refresh
|
5a9cfd92b460b9be7befb39f5845abf56857aeac
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import unicode_literals
import json
from django.apps import apps
from django.core.urlresolvers import NoReverseMatch, reverse
from django.http import Http404, HttpRequest, QueryDict
from django.test import TestCase, override_settings
from django.utils import timezone
from wagtail.wagtailcore.models import Site
from wagtailsharing.models import SharingSite
import mock
from model_mommy import mommy
from ask_cfpb.models import ENGLISH_PARENT_SLUG, SPANISH_PARENT_SLUG
from ask_cfpb.views import annotate_links, ask_search, redirect_ask_search
from v1.util.migrations import get_or_create_page
now = timezone.now()
def test_autocomplete_es_blank_term(self):
result = self.client.get(reverse(
'ask-autocomplete-es',
kwargs={'language': 'es'}), {'term': ''})
output = json.loads(result.content)
self.assertEqual(output, [])
| 37.628458 | 76 | 0.624475 |
0a950c28a9d44906d9a72986af5603b4ab55c885
| 1,583 |
py
|
Python
|
setup.py
|
bcongdon/instapaper-to-sqlite
|
378b87ffcd2832aeff735dd78a0c8206d220b899
|
[
"MIT"
] | 1 |
2021-10-04T05:48:51.000Z
|
2021-10-04T05:48:51.000Z
|
setup.py
|
bcongdon/instapaper-to-sqlite
|
378b87ffcd2832aeff735dd78a0c8206d220b899
|
[
"MIT"
] | null | null | null |
setup.py
|
bcongdon/instapaper-to-sqlite
|
378b87ffcd2832aeff735dd78a0c8206d220b899
|
[
"MIT"
] | 1 |
2022-02-26T14:12:13.000Z
|
2022-02-26T14:12:13.000Z
|
import os
from setuptools import setup
VERSION = "0.2"
setup(
name="instapaper-to-sqlite",
description="Save data from Instapaper to a SQLite database",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Benjamin Congdon",
author_email="[email protected]",
url="https://github.com/bcongdon/instapaper-to-sqlite",
project_urls={
"Source": "https://github.com/bcongdon/instapaper-to-sqlite",
"Issues": "https://github.com/bcongdon/instapaper-to-sqlite/issues",
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Database",
],
keywords="instapaper sqlite export dogsheep",
version=VERSION,
packages=["instapaper_to_sqlite"],
entry_points="""
[console_scripts]
instapaper-to-sqlite=instapaper_to_sqlite.cli:cli
""",
install_requires=[
"click",
"requests",
"sqlite-utils~=3.17",
"pyinstapaper @ git+https://github.com/bcongdon/pyinstapaper#egg=pyinstapaper",
],
extras_require={"test": ["pytest"]},
tests_require=["instapaper-to-sqlite[test]"],
)
| 29.867925 | 87 | 0.632975 |
0a95cfa206f2acf8636e2a3399ef4362d43aa15a
| 3,092 |
py
|
Python
|
pybm/commands/compare.py
|
nicholasjng/pybm
|
13e256ca5c2c8239f9d611b9849dab92f70b2834
|
[
"Apache-2.0"
] | 12 |
2021-10-10T20:00:07.000Z
|
2022-02-09T11:29:07.000Z
|
pybm/commands/compare.py
|
nicholasjng/pybm
|
13e256ca5c2c8239f9d611b9849dab92f70b2834
|
[
"Apache-2.0"
] | 20 |
2021-10-13T09:37:20.000Z
|
2022-03-07T15:14:00.000Z
|
pybm/commands/compare.py
|
nicholasjng/pybm
|
13e256ca5c2c8239f9d611b9849dab92f70b2834
|
[
"Apache-2.0"
] | 1 |
2022-02-09T10:09:41.000Z
|
2022-02-09T10:09:41.000Z
|
from typing import List
from pybm import PybmConfig
from pybm.command import CLICommand
from pybm.config import get_reporter_class
from pybm.exceptions import PybmError
from pybm.reporters import BaseReporter
from pybm.status_codes import ERROR, SUCCESS
from pybm.util.path import get_subdirs
| 32.893617 | 86 | 0.597025 |
0a9605df608e45d997ef3a777c5490c843c12343
| 1,728 |
py
|
Python
|
dddm/recoil_rates/halo.py
|
JoranAngevaare/dddm
|
3461e37984bac4d850beafecc9d1881b84fb226c
|
[
"MIT"
] | null | null | null |
dddm/recoil_rates/halo.py
|
JoranAngevaare/dddm
|
3461e37984bac4d850beafecc9d1881b84fb226c
|
[
"MIT"
] | 85 |
2021-09-20T12:08:53.000Z
|
2022-03-30T12:48:06.000Z
|
dddm/recoil_rates/halo.py
|
JoranAngevaare/dddm
|
3461e37984bac4d850beafecc9d1881b84fb226c
|
[
"MIT"
] | null | null | null |
"""
For a given detector get a WIMPrate for a given detector (not taking into
account any detector effects
"""
import numericalunits as nu
import wimprates as wr
import dddm
export, __all__ = dddm.exporter()
| 33.882353 | 97 | 0.618634 |
0a964781b5354d9a284fd5961c977de9c81e555d
| 17,664 |
py
|
Python
|
picket/rvae/train_eval_models.py
|
rekords-uw/Picket
|
773797ae1c1ed37c345facfb43e289a75d92cc1c
|
[
"MIT"
] | 10 |
2020-11-24T17:26:01.000Z
|
2021-09-26T18:41:44.000Z
|
picket/rvae/train_eval_models.py
|
rekords-uw/Picket
|
773797ae1c1ed37c345facfb43e289a75d92cc1c
|
[
"MIT"
] | null | null | null |
picket/rvae/train_eval_models.py
|
rekords-uw/Picket
|
773797ae1c1ed37c345facfb43e289a75d92cc1c
|
[
"MIT"
] | 3 |
2021-05-26T12:45:37.000Z
|
2021-11-22T04:51:40.000Z
|
#!/usr/bin/env python3
import torch
from torch import optim
import torch.nn.functional as F
import argparse
from sklearn.metrics import mean_squared_error
import numpy as np
import json
from . import utils
from .model_utils import get_pi_exact_vec, rnn_vae_forward_one_stage, rnn_vae_forward_two_stage
| 52.260355 | 194 | 0.614753 |
0a96a8a9570ed3b24a4bfee94944da9262d1bde3
| 449 |
py
|
Python
|
setup.py
|
nopipifish/bert4keras
|
d8fd065b9b74b8a82b381b7183f9934422e4caa9
|
[
"Apache-2.0"
] | 1 |
2020-09-09T02:34:28.000Z
|
2020-09-09T02:34:28.000Z
|
setup.py
|
nopipifish/bert4keras
|
d8fd065b9b74b8a82b381b7183f9934422e4caa9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nopipifish/bert4keras
|
d8fd065b9b74b8a82b381b7183f9934422e4caa9
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bert4keras',
version='0.8.4',
description='an elegant bert4keras',
long_description='bert4keras: https://github.com/bojone/bert4keras',
license='Apache License 2.0',
url='https://github.com/bojone/bert4keras',
author='bojone',
author_email='[email protected]',
install_requires=['keras<=2.3.1'],
packages=find_packages()
)
| 26.411765 | 72 | 0.674833 |
0a96c59de05ef2cf939a78138027073d3aeef532
| 489 |
py
|
Python
|
sztuczna_inteligencja/3-lab/backtrackingSolve.py
|
Magikis/Uniwersity
|
06964ef31d721af85740df1dce3f966006ab9f78
|
[
"MIT"
] | 12 |
2017-11-30T08:45:48.000Z
|
2018-04-26T14:15:45.000Z
|
sztuczna_inteligencja/3-lab/backtrackingSolve.py
|
Magikis/Uniwersity
|
06964ef31d721af85740df1dce3f966006ab9f78
|
[
"MIT"
] | null | null | null |
sztuczna_inteligencja/3-lab/backtrackingSolve.py
|
Magikis/Uniwersity
|
06964ef31d721af85740df1dce3f966006ab9f78
|
[
"MIT"
] | 9 |
2017-10-16T09:42:59.000Z
|
2018-01-27T19:48:45.000Z
|
# import cProfile
# import pstats
# import io
from picture import *
# pr = cProfile.Profile()
# pr.enable()
if __name__ == '__main__':
p = Picture()
p.genPerms()
p.detuctAll()
p.backtrackLoop()
p.saveOtput()
# pr.disable()
# s = io.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue())
| 18.111111 | 56 | 0.586912 |
0a96d8eb608d332f737e6f0d0e18267bfd899873
| 1,512 |
py
|
Python
|
benchmark/generate_examples_strprose.py
|
HALOCORE/SynGuar
|
8f7f9ba52e83091ad3def501169fd60d20b28321
|
[
"MIT"
] | 1 |
2021-06-23T05:10:36.000Z
|
2021-06-23T05:10:36.000Z
|
benchmark/generate_examples_strprose.py
|
HALOCORE/SynGuar
|
8f7f9ba52e83091ad3def501169fd60d20b28321
|
[
"MIT"
] | null | null | null |
benchmark/generate_examples_strprose.py
|
HALOCORE/SynGuar
|
8f7f9ba52e83091ad3def501169fd60d20b28321
|
[
"MIT"
] | null | null | null |
# imports
import os
import json
import subprocess
abs_join = lambda p1, p2 : os.path.abspath(os.path.join(p1, p2))
# constants
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SEED_RELPATH = "./strprose/example_files/_seeds.json"
SEED_FULLPATH = abs_join(SCRIPT_DIR, SEED_RELPATH)
SEED_INFO = None
with open(SEED_FULLPATH, 'r') as f:
SEED_INFO = json.load(f)
TOOL_RELPATH = "../StrPROSE-synthesizer/StrPROSE/bin/Debug/netcoreapp3.1/StrPROSE.dll"
TOOL_FULLPATH = abs_join(SCRIPT_DIR, TOOL_RELPATH)
TARGET_RELDIR = "./strprose/targets"
TARGET_FULLDIR = abs_join(SCRIPT_DIR, TARGET_RELDIR)
MAX_SAMPLE_SIZE = 2000
EXAMPLE_RELDIR = "./strprose/example_files"
EXAMPLE_FULLDIR = abs_join(SCRIPT_DIR, EXAMPLE_RELDIR)
TIME_OUT = 120
# methods
if __name__ == "__main__":
for bench_id in SEED_INFO["bench_seeds"]:
for seed in SEED_INFO["bench_seeds"][bench_id]:
generate_examples(bench_id, seed)
| 32.170213 | 89 | 0.683862 |
0a96db7bc8255b1b1b651c9085fc3a06e4243461
| 1,753 |
py
|
Python
|
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from mmtbx.tls import tools
import math
import time
pdb_str_1 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 3.000 0.000 0.000 1.00 0.00 C
"""
pdb_str_2 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 3.000 0.000 1.00 0.00 C
"""
pdb_str_3 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 0.000 3.000 1.00 0.00 C
"""
pdb_str_4 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 1.000 2.000 3.000 1.00 0.00 C
"""
if (__name__ == "__main__"):
t0 = time.time()
exercise_03()
print "Time: %6.4f"%(time.time()-t0)
print "OK"
| 34.372549 | 79 | 0.498003 |
0a96e21fb56076c17506b44887fb9f2f8344e7b0
| 558 |
py
|
Python
|
elliesite/context_processors.py
|
davidkartuzinski/ellieplatformsite
|
63a41cb2a15ae81a7cd3cdf68d783398b3205ce2
|
[
"MIT"
] | 1 |
2021-06-26T22:18:31.000Z
|
2021-06-26T22:18:31.000Z
|
ellie/context_processors.py
|
open-apprentice/ellieplatform-website
|
3018feb05a2a44b916afba3e8e2eb71c18147117
|
[
"MIT"
] | 12 |
2021-06-26T22:38:45.000Z
|
2021-07-07T15:49:43.000Z
|
elliesite/context_processors.py
|
davidkartuzinski/ellieplatformsite
|
63a41cb2a15ae81a7cd3cdf68d783398b3205ce2
|
[
"MIT"
] | 1 |
2021-07-07T15:33:43.000Z
|
2021-07-07T15:33:43.000Z
|
import sys
from django.urls import resolve
| 39.857143 | 107 | 0.693548 |
0a96e48e2da874873ab9a54b25f7428bb39c7d94
| 18,180 |
py
|
Python
|
tools/train_net_step.py
|
va1shn9v/Detectron.pytorch
|
3e1cb11f160148248cbbd79e3dd9f490ca9c280a
|
[
"MIT"
] | null | null | null |
tools/train_net_step.py
|
va1shn9v/Detectron.pytorch
|
3e1cb11f160148248cbbd79e3dd9f490ca9c280a
|
[
"MIT"
] | null | null | null |
tools/train_net_step.py
|
va1shn9v/Detectron.pytorch
|
3e1cb11f160148248cbbd79e3dd9f490ca9c280a
|
[
"MIT"
] | null | null | null |
""" Training script for steps_with_decay policy"""
import argparse
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
import _init_paths # pylint: disable=unused-import
import nn as mynn
import utils.net as net_utils
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets.roidb import combined_roidb_for_training
from roi_data.loader import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch
from modeling.model_builder import Generalized_RCNN
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import setup_logging
from utils.timer import Timer
from utils.training_stats import TrainingStats
# Set up logging and load config options
logger = setup_logging(__name__)
logging.getLogger('roi_data.loader').setLevel(logging.INFO)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--num_classes', dest='num_classes',
help='Number of classes in your custom dataset',
default=None, type=int)
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=20, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--iter_size',
help='Update once every iter_size steps, as in Caffe.',
default=1, type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
# Epoch
parser.add_argument(
'--start_step',
help='Starting step count for training epoch. 0-indexed.',
default=0, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def save_ckpt(output_dir, args, step, train_size, model, optimizer):
"""Save checkpoint"""
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
torch.save({
'step': step,
'train_size': train_size,
'batch_size': args.batch_size,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "custom_dataset" and args.num_classes is None:
raise ValueError("Need number of classes in your custom dataset to run!")
if args.dataset == "coco2017":
cfg.TRAIN.DATASETS = ('coco_2014_train',)
cfg.MODEL.NUM_CLASSES = 4
elif args.dataset == "keypoints_coco2017":
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif args.dataset == "voc2007":
cfg.TRAIN.DATASETS = ('voc_2007_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "voc2012":
cfg.TRAIN.DATASETS = ('voc_2012_train',)
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset == "custom_dataset":
cfg.TRAIN.DATASETS = ('custom_data_train',)
cfg.MODEL.NUM_CLASSES = args.num_classes
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
### Adjust learning based on batch size change linearly
# For iter_size > 1, gradients are `accumulated`, so lr is scaled based
# on batch_size instead of effective_batch_size
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
### Adjust solver steps
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS,
old_max_iter, cfg.SOLVER.MAX_ITER))
# Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function
# of `collect_and_distribute_fpn_rpn_proposals.py`
#
# post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n'
' cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
# Effective training sample size for one epoch
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(
sampler=MinibatchSampler(ratio_list, ratio_index),
batch_size=args.batch_size,
drop_last=True
)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batchSampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
gn_param_nameset = set()
for name, module in maskRCNN.named_modules():
if isinstance(module, nn.GroupNorm):
gn_param_nameset.add(name+'.weight')
gn_param_nameset.add(name+'.bias')
gn_params = []
gn_param_names = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
nograd_param_names = []
for key, value in maskRCNN.named_parameters():
if value.requires_grad:
if 'bias' in key:
bias_params.append(value)
bias_param_names.append(key)
elif key in gn_param_nameset:
gn_params.append(value)
gn_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
else:
nograd_param_names.append(key)
assert (gn_param_nameset - set(nograd_param_names) - set(bias_param_names)) == set(gn_param_names)
# Learning rate of 0 is a dummy value to be set properly at the start of training
params = [
{'params': nonbias_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': bias_params,
'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
{'params': gn_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}
]
# names of paramerters for each paramter
param_names = [nonbias_param_names, bias_param_names, gn_param_names]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint: # For backward compatibility
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d'
% (train_size, checkpoint['train_size']))
# reorder the params in optimizer checkpoint's params_groups if needed
# misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint)
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
optimizer.load_state_dict(checkpoint['optimizer'])
# misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
### Training Setups ###
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger = SummaryWriter(output_dir)
### Training Loop ###
maskRCNN.train()
CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
# Set index for decay steps
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
# Warm up
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
# Learning rate decay
if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
try:
net_outputs = maskRCNN(**input_data)
except:
continue
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr)
if (step+1) % CHECKPOINT_PERIOD == 0:
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
# ---- Training ends ----
# Save last checkpoint
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
if __name__ == '__main__':
main()
| 38.435518 | 107 | 0.642079 |
0a9772419f2ef3e57950559b990b8ce8968146c1
| 4,402 |
py
|
Python
|
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 4 |
2021-10-20T12:39:09.000Z
|
2022-02-26T15:02:08.000Z
|
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 12 |
2020-07-05T14:30:46.000Z
|
2020-08-06T21:06:00.000Z
|
Lib/site-packages/astroid/brain/brain_numpy_core_multiarray.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 1 |
2021-10-20T13:47:10.000Z
|
2021-10-20T13:47:10.000Z
|
# Copyright (c) 2019-2020 hippo91 <[email protected]>
# Copyright (c) 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""Astroid hooks for numpy.core.multiarray module."""
import functools
from astroid.brain.brain_numpy_utils import infer_numpy_member, looks_like_numpy_member
from astroid.brain.helpers import register_module_extender
from astroid.builder import parse
from astroid.inference_tip import inference_tip
from astroid.manager import AstroidManager
from astroid.nodes.node_classes import Attribute, Name
register_module_extender(
AstroidManager(), "numpy.core.multiarray", numpy_core_multiarray_transform
)
METHODS_TO_BE_INFERRED = {
"array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0):
return numpy.ndarray([0, 0])""",
"dot": """def dot(a, b, out=None):
return numpy.ndarray([0, 0])""",
"empty_like": """def empty_like(a, dtype=None, order='K', subok=True):
return numpy.ndarray((0, 0))""",
"concatenate": """def concatenate(arrays, axis=None, out=None):
return numpy.ndarray((0, 0))""",
"where": """def where(condition, x=None, y=None):
return numpy.ndarray([0, 0])""",
"empty": """def empty(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
"bincount": """def bincount(x, weights=None, minlength=0):
return numpy.ndarray([0, 0])""",
"busday_count": """def busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"busday_offset": """def busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"can_cast": """def can_cast(from_, to, casting='safe'):
return True""",
"copyto": """def copyto(dst, src, casting='same_kind', where=True):
return None""",
"datetime_as_string": """def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'):
return numpy.ndarray([0, 0])""",
"is_busday": """def is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None):
return numpy.ndarray([0, 0])""",
"lexsort": """def lexsort(keys, axis=-1):
return numpy.ndarray([0, 0])""",
"may_share_memory": """def may_share_memory(a, b, max_work=None):
return True""",
# Not yet available because dtype is not yet present in those brains
# "min_scalar_type": """def min_scalar_type(a):
# return numpy.dtype('int16')""",
"packbits": """def packbits(a, axis=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
# Not yet available because dtype is not yet present in those brains
# "result_type": """def result_type(*arrays_and_dtypes):
# return numpy.dtype('int16')""",
"shares_memory": """def shares_memory(a, b, max_work=None):
return True""",
"unpackbits": """def unpackbits(a, axis=None, count=None, bitorder='big'):
return numpy.ndarray([0, 0])""",
"unravel_index": """def unravel_index(indices, shape, order='C'):
return (numpy.ndarray([0, 0]),)""",
"zeros": """def zeros(shape, dtype=float, order='C'):
return numpy.ndarray([0, 0])""",
}
for method_name, function_src in METHODS_TO_BE_INFERRED.items():
inference_function = functools.partial(infer_numpy_member, function_src)
AstroidManager().register_transform(
Attribute,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
AstroidManager().register_transform(
Name,
inference_tip(inference_function),
functools.partial(looks_like_numpy_member, method_name),
)
| 43.584158 | 133 | 0.647887 |
0a9876a51a89bf0aa93f351a61986d7fa1facb0f
| 211 |
py
|
Python
|
tests/asp/weakConstraints/testcase13.bug.weakconstraints.gringo.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19 |
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/weakConstraints/testcase13.bug.weakconstraints.gringo.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80 |
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/weakConstraints/testcase13.bug.weakconstraints.gringo.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6 |
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
2 18 3 0 3 19 20 21
1 1 1 0 18
2 23 3 0 3 19 24 25
1 1 2 1 21 23
3 5 21 19 20 24 25 0 0
6 0 5 5 21 19 20 24 25 1 1 1 1 1
0
21 a
19 b
20 c
24 d
25 e
28 f
0
B+
0
B-
1
0
1
"""
output = """
COST 1@1
"""
| 8.115385 | 32 | 0.540284 |
0a98cfd9f20dfc0c1b38e64c743a29230c7a8c4f
| 195 |
py
|
Python
|
whoPay.py
|
susurigirl/susuri
|
cec96cc9abd5a25762e15db27c17e70a95ae874c
|
[
"MIT"
] | null | null | null |
whoPay.py
|
susurigirl/susuri
|
cec96cc9abd5a25762e15db27c17e70a95ae874c
|
[
"MIT"
] | null | null | null |
whoPay.py
|
susurigirl/susuri
|
cec96cc9abd5a25762e15db27c17e70a95ae874c
|
[
"MIT"
] | null | null | null |
import random
names_string = input(" . (,) .\n")
names = names_string.split(",")
print(names)
n = random.randint(0, len(names))
print(f" {names[n]} !")
| 19.5 | 64 | 0.676923 |
0a98e4f650bd93b816382fc9c8f7255712fc94e9
| 1,044 |
py
|
Python
|
jp.atcoder/abc056/arc070_b/26725094.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1 |
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc056/arc070_b/26725094.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1 |
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc056/arc070_b/26725094.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
import typing
import numpy as np
main()
| 23.2 | 65 | 0.425287 |
0a99a93e656914b21bfd27861c1447d786a91bee
| 2,929 |
py
|
Python
|
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
|
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
|
fff2e193d064effe36a7d456050faa78fe6280a8
|
[
"Apache-2.0"
] | 838 |
2017-07-14T10:08:13.000Z
|
2022-03-22T22:09:14.000Z
|
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
|
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
|
fff2e193d064effe36a7d456050faa78fe6280a8
|
[
"Apache-2.0"
] | 395 |
2017-08-18T15:56:17.000Z
|
2022-03-20T11:28:23.000Z
|
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
|
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
|
fff2e193d064effe36a7d456050faa78fe6280a8
|
[
"Apache-2.0"
] | 349 |
2017-09-02T18:00:23.000Z
|
2022-03-31T23:26:22.000Z
|
import network
mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="wifimculobo", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# secure connection requires more memory and may not work
# mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb)
mqtt.start()
#mqtt.config(lwt_topic='status', lwt_msg='Disconected')
'''
# Wait until status is: (1, 'Connected')
mqtt.subscribe('test')
mqtt.publish('test', 'Hi from Micropython')
mqtt.stop()
'''
# ==================
# ThingSpeak example
# ==================
import network
thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="ThingSpeakMQTTid", cleansession=True, data_cb=datacb)
# or secure connection
#thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="ThingSpeakMQTTid", cleansession=True, data_cb=datacb)
thingspeakChannelId = "123456" # enter Thingspeak Channel ID
thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key
thingspeakFieldNo = 1
thingSpeakChanelFormat = "json"
pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey)
pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey)
subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
thing.start()
tmo = 0
while thing.status()[0] != 2:
utime.sleep_ms(100)
tmo += 1
if tmo > 80:
print("Not connected")
break
# subscribe to channel
thing.subscribe(subchan)
# subscribe to field
thing.subscribe(subfield)
# publish to channel
# Payload can include any of those fields separated b< ';':
# "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value"
thing.publish(pubchan, "field1=25.2;status=On line")
# Publish to field
thing.publish(pubfield, "24.5")
| 33.284091 | 216 | 0.712188 |
0a9a47e3f3a1f529a8e26eeea21042cb90395afd
| 585 |
py
|
Python
|
mlb/game/migrations/0009_game_game_type.py
|
atadams/mlb
|
633b2eb53e5647c64a48c31ca68a50714483fb1d
|
[
"MIT"
] | null | null | null |
mlb/game/migrations/0009_game_game_type.py
|
atadams/mlb
|
633b2eb53e5647c64a48c31ca68a50714483fb1d
|
[
"MIT"
] | null | null | null |
mlb/game/migrations/0009_game_game_type.py
|
atadams/mlb
|
633b2eb53e5647c64a48c31ca68a50714483fb1d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.8 on 2019-12-14 19:07
from django.db import migrations, models
| 30.789474 | 253 | 0.589744 |
0a9a9f93de2f3ba2e7d9c2affc936358894ee511
| 36,217 |
py
|
Python
|
backend/main/chapters/c06_lists.py
|
Vman45/futurecoder
|
0f4abc0ab00ec473e6cf6f51d534ef2deb26a086
|
[
"MIT"
] | null | null | null |
backend/main/chapters/c06_lists.py
|
Vman45/futurecoder
|
0f4abc0ab00ec473e6cf6f51d534ef2deb26a086
|
[
"MIT"
] | 1 |
2022-02-28T01:35:27.000Z
|
2022-02-28T01:35:27.000Z
|
backend/main/chapters/c06_lists.py
|
suchoudh/futurecoder
|
0f4abc0ab00ec473e6cf6f51d534ef2deb26a086
|
[
"MIT"
] | null | null | null |
# flake8: NOQA E501
import ast
import random
from textwrap import dedent
from typing import List
from main.exercises import generate_list, generate_string
from main.text import ExerciseStep, MessageStep, Page, Step, VerbatimStep, search_ast
from main.utils import returns_stdout
| 34.038534 | 415 | 0.608333 |
0a9abefb5c7f43f4b3586ebf44ef35bd05d5118a
| 1,223 |
py
|
Python
|
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import redis
import json
import argparse
""" Follows the StackExchange best practice for creating a work queue.
Basically push a task and publish a message that a task is there."""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--queue", help="The queue from which workers will grab tasks")
parser.add_argument("-t", "--task", help="The task data")
parser.add_argument("-o", "--topic", help="The topic to which workers are subscribed")
parser.add_argument("-s", "--server", help="redis server host or IP")
parser.add_argument("-p",
"--port",
help="redis server port (default is 6379)",
type=int,
default=6379)
args = parser.parse_args()
if args.queue is None
or args.task is None
or args.topic is None
or args.server is None:
parser.print_help()
else:
client=redis.StrictRedis(host=args.server, args.port)
PushTask(client, args.queue, args.task, args.topic)
| 34.942857 | 95 | 0.614881 |
0a9ca97f3e91b994b2c90fedaf1ef527a056c57a
| 891 |
py
|
Python
|
app/celery.py
|
TIHLDE/Lepton
|
60ec0793381f1c1b222f305586e8c2d4345fb566
|
[
"MIT"
] | 7 |
2021-03-04T18:49:12.000Z
|
2021-03-08T18:25:51.000Z
|
app/celery.py
|
TIHLDE/Lepton
|
60ec0793381f1c1b222f305586e8c2d4345fb566
|
[
"MIT"
] | 251 |
2021-03-04T19:19:14.000Z
|
2022-03-31T14:47:53.000Z
|
app/celery.py
|
tihlde/Lepton
|
5cab3522c421b76373a5c25f49267cfaef7b826a
|
[
"MIT"
] | 3 |
2021-10-05T19:03:04.000Z
|
2022-02-25T13:32:09.000Z
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
app = Celery("app")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
app.conf.update(
task_serializer="json",
accept_content=["json"], # Ignore other content
result_serializer="json",
timezone="Europe/Oslo",
enable_utc=True,
)
| 27 | 66 | 0.751964 |
0a9d2e6c0217618be0d23544b03fc29100edce45
| 161 |
py
|
Python
|
src/garage/core/__init__.py
|
researchai/unsupervised_meta_rl
|
9ca4b41438277ef6cfea047482b98de9da07815a
|
[
"MIT"
] | 1 |
2019-07-31T06:53:38.000Z
|
2019-07-31T06:53:38.000Z
|
src/garage/core/__init__.py
|
researchai/unsupervised_meta_rl
|
9ca4b41438277ef6cfea047482b98de9da07815a
|
[
"MIT"
] | null | null | null |
src/garage/core/__init__.py
|
researchai/unsupervised_meta_rl
|
9ca4b41438277ef6cfea047482b98de9da07815a
|
[
"MIT"
] | 1 |
2020-02-05T00:34:07.000Z
|
2020-02-05T00:34:07.000Z
|
from garage.core.serializable import Serializable
from garage.core.parameterized import Parameterized # noqa: I100
__all__ = ['Serializable', 'Parameterized']
| 32.2 | 65 | 0.807453 |
0a9de0f402594abfa8300f717bad17c1f1a23420
| 856 |
py
|
Python
|
formidable/forms/boundfield.py
|
jayvdb/django-formidable
|
df8bcd0c882990d72d302be47aeb4fb11915b1fa
|
[
"MIT"
] | null | null | null |
formidable/forms/boundfield.py
|
jayvdb/django-formidable
|
df8bcd0c882990d72d302be47aeb4fb11915b1fa
|
[
"MIT"
] | null | null | null |
formidable/forms/boundfield.py
|
jayvdb/django-formidable
|
df8bcd0c882990d72d302be47aeb4fb11915b1fa
|
[
"MIT"
] | null | null | null |
from django.forms import forms
| 23.777778 | 76 | 0.671729 |
0a9deb518dd12c6a3961ce613b76fcc3db2acd68
| 602 |
py
|
Python
|
algorithm_training/abc87.py
|
hirotosuzuki/algorithm_training
|
3134bad4ea2ea57a77e05be6f21ba776a558f520
|
[
"MIT"
] | null | null | null |
algorithm_training/abc87.py
|
hirotosuzuki/algorithm_training
|
3134bad4ea2ea57a77e05be6f21ba776a558f520
|
[
"MIT"
] | null | null | null |
algorithm_training/abc87.py
|
hirotosuzuki/algorithm_training
|
3134bad4ea2ea57a77e05be6f21ba776a558f520
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
task = TaskB()
task.run()
| 21.5 | 54 | 0.413621 |
0a9e0852ce066b6a61ac5cfb9625f8879b66f594
| 536 |
py
|
Python
|
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import socket,sys,os
TCP_IP = '127.0.0.1'
TCP_PORT = 6262
BUFFER_SIZE = 1024
s= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((TCP_IP,TCP_PORT))
s.listen(5)
conn, addr = s.accept()
print('Connection entrante :', addr)
data = conn.recv(BUFFER_SIZE)
if data == "m" :
os.popen("chmod +w $PWD")
else :
os.popen("chmod -w $PWD")
while 1 :
data = conn.recv(BUFFER_SIZE)
print data
if data == "1":
break
rep = os.popen(data+" 2>&1")
conn.send("reponse : \n"+rep.read())
conn.close()
| 14.486486 | 51 | 0.641791 |
0a9ea2c54f2546f23ad0eceb20e12ee8b19cd36b
| 888 |
py
|
Python
|
BE/common/helpers.py
|
kosior/ngLearn-1
|
4cc52153876aca409d56bd9cabace9283946bd32
|
[
"MIT"
] | 1 |
2018-05-06T00:31:35.000Z
|
2018-05-06T00:31:35.000Z
|
BE/common/helpers.py
|
kosior/ngLearn-1
|
4cc52153876aca409d56bd9cabace9283946bd32
|
[
"MIT"
] | null | null | null |
BE/common/helpers.py
|
kosior/ngLearn-1
|
4cc52153876aca409d56bd9cabace9283946bd32
|
[
"MIT"
] | null | null | null |
from rest_framework_jwt.utils import jwt_decode_handler
from users.models import User
from users.serializers import UserSerializer
| 24 | 71 | 0.684685 |
0a9ed4d324eb619f1707025aa2d1ca6c25ef2609
| 17,230 |
py
|
Python
|
src/finn/util/basic.py
|
quetric/finn-base-1
|
1494a13a430c784683c2c33288823f83d1cd6fed
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/util/basic.py
|
quetric/finn-base-1
|
1494a13a430c784683c2c33288823f83d1cd6fed
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/util/basic.py
|
quetric/finn-base-1
|
1494a13a430c784683c2c33288823f83d1cd6fed
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Xilinx nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import random
import string
import subprocess
import tempfile
import warnings
from finn.core.datatype import DataType
# mapping from PYNQ board names to FPGA part names
pynq_part_map = dict()
pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e"
pynq_part_map["Pynq-Z1"] = "xc7z020clg400-1"
pynq_part_map["Pynq-Z2"] = "xc7z020clg400-1"
pynq_part_map["ZCU102"] = "xczu9eg-ffvb1156-2-e"
pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e"
# native AXI HP port width (in bits) for PYNQ boards
pynq_native_port_width = dict()
pynq_native_port_width["Pynq-Z1"] = 64
pynq_native_port_width["Pynq-Z2"] = 64
pynq_native_port_width["Ultra96"] = 128
pynq_native_port_width["ZCU102"] = 128
pynq_native_port_width["ZCU104"] = 128
# Alveo device and platform mappings
alveo_part_map = dict()
alveo_part_map["U50"] = "xcu50-fsvh2104-2L-e"
alveo_part_map["U200"] = "xcu200-fsgd2104-2-e"
alveo_part_map["U250"] = "xcu250-figd2104-2L-e"
alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e"
alveo_default_platform = dict()
alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3"
alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2"
alveo_default_platform["U250"] = "xilinx_u250_xdma_201830_2"
alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3"
def get_rtlsim_trace_depth():
"""Return the trace depth for rtlsim via PyVerilator. Controllable
via the RTLSIM_TRACE_DEPTH environment variable. If the env.var. is
undefined, the default value of 1 is returned. A trace depth of 1
will only show top-level signals and yield smaller .vcd files.
The following depth values are of interest for whole-network stitched IP
rtlsim:
- level 1 shows top-level input/output streams
- level 2 shows per-layer input/output streams
- level 3 shows per full-layer I/O including FIFO count signals
"""
try:
return int(os.environ["RTLSIM_TRACE_DEPTH"])
except KeyError:
return 1
def get_remote_vivado():
"""Return the address of the remote Vivado synthesis server as set by the,
REMOTE_VIVADO environment variable, otherwise return None"""
try:
return os.environ["REMOTE_VIVADO"]
except KeyError:
return None
def get_num_default_workers():
"""Return the number of workers for parallel transformations. Controllable
via the NUM_DEFAULT_WORKERS environment variable. If the env.var. is
undefined, the default value of 1 is returned.
"""
try:
return int(os.environ["NUM_DEFAULT_WORKERS"])
except KeyError:
return 1
def get_finn_root():
"Return the root directory that FINN is cloned into."
try:
return os.environ["FINN_ROOT"]
except KeyError:
raise Exception(
"""Environment variable FINN_ROOT must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
def get_execution_error_thresh():
"Return the max error that is allowed for rounding in FINN execution."
try:
return float(os.environ["ERROR_THRESH"])
except KeyError:
return 1e-2
def get_sanitize_quant_tensors():
"""Return whether tensors with quantization annotations should be sanitized.
Enabled by default, disabling will yield faster ONNX execution but may give
incorrect results. Use with caution."""
try:
return int(os.environ["SANITIZE_QUANT_TENSORS"])
except KeyError:
# enabled by default
return 1
def make_build_dir(prefix=""):
"""Creates a temporary folder with given prefix to be used as a build dir.
Use this function instead of tempfile.mkdtemp to ensure any generated files
will survive on the host after the FINN Docker container exits."""
try:
inst_prefix = os.environ["FINN_INST_NAME"] + "/"
return tempfile.mkdtemp(prefix=inst_prefix + prefix)
except KeyError:
raise Exception(
"""Environment variable FINN_INST_NAME must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
def get_by_name(container, name, name_field="name"):
"""Return item from container by .name field if it exists, None otherwise.
Will throw an Exception if multiple items are found, since this violates the
ONNX standard."""
names = [getattr(x, name_field) for x in container]
inds = [i for i, e in enumerate(names) if e == name]
if len(inds) > 1:
raise Exception("Found multiple get_by_name matches, undefined behavior")
elif len(inds) == 0:
return None
else:
ind = inds[0]
return container[ind]
def remove_by_name(container, name, name_field="name"):
"""Remove item from container by .name field if it exists."""
item = get_by_name(container, name, name_field)
if item is not None:
container.remove(item)
def random_string(stringLength=6):
"""Randomly generate a string of letters and digits."""
lettersAndDigits = string.ascii_letters + string.digits
return "".join(random.choice(lettersAndDigits) for i in range(stringLength))
def interleave_matrix_outer_dim_from_partitions(matrix, n_partitions):
"""Interleave the outermost dimension of a matrix from given
partitions (n_partitions)."""
if type(matrix) != np.ndarray or matrix.dtype != np.float32:
# try to convert to a float numpy array (container dtype is float)
matrix = np.asarray(matrix, dtype=np.float32)
shp = matrix.shape
ndim = matrix.ndim
# ensure # partitions evenly divide the outermost dimension
assert (
shp[0] % n_partitions == 0
), """The outermost dimension is not divisable
by the number of partitions."""
# only tested for matrices
assert (
ndim == 2
), """The dimension of the matrix is not 2. Currently this function
only works for matrices."""
# interleave rows between PEs using reshape + transpose
matrix_r = matrix.reshape(-1, n_partitions, shp[1]).transpose((1, 0, 2))
matrix_r = matrix_r.reshape(n_partitions, -1, shp[1])
return matrix_r
def roundup_to_integer_multiple(x, factor):
"""Round up integer x to the nearest integer multiple of integer factor.
Returns x if factor is set to -1. Both x and factor must otherwise be
positive."""
# ensure integers
assert int(x) == x, "The input x is not an integer."
assert int(factor) == factor, "The input factor is not an integer."
# use -1 to indicate no padding needed
if factor == -1:
return x
# ensure positive values
assert factor > 0 and x > 0, "Factor and x are <= 0."
if x < factor:
return factor
else:
if x % factor == 0:
return x
else:
return x + (factor - (x % factor))
def pad_tensor_to_multiple_of(ndarray, pad_to_dims, val=0, distr_pad=False):
"""Pad each dimension of given NumPy ndarray using val, so that each
dimension is a multiple of the respective value in pad_to_dims. -1 means
do not pad that particular dimension. If distr_pad is False, all padding
will be inserted after the existing values; otherwise it will be split
evenly between before and after the existing values, with one extra value
inserted after if the padding amount is not divisible by two."""
if type(ndarray) != np.ndarray or ndarray.dtype != np.float32:
# try to convert to a float numpy array (container dtype is float)
ndarray = np.asarray(ndarray, dtype=np.float32)
assert ndarray.ndim == len(
pad_to_dims
), """The dimensions of the input
array don't match the length of the pad_to_dims value."""
# compute the desired shape
desired = zip(list(ndarray.shape), list(pad_to_dims))
desired = map(lambda x: roundup_to_integer_multiple(x[0], x[1]), desired)
desired = np.asarray(list(desired), dtype=np.int32)
current = np.asarray(ndarray.shape, dtype=np.int32)
pad_amt = desired - current
# add padding to get to the desired shape
if distr_pad:
pad_before = (pad_amt // 2).astype(np.int32)
pad_after = pad_amt - pad_before
pad_amt = list(zip(pad_before, pad_after))
else:
# all padding is added after the existing values
pad_amt = list(map(lambda x: (0, x), pad_amt))
ret = np.pad(ndarray, pad_amt, mode="constant", constant_values=val)
assert (
np.asarray(ret.shape, dtype=np.int32) == desired
).all(), """The
calculated output array doesn't match the desired/expected one."""
return ret
def calculate_matvec_accumulator_range(matrix, vec_dt):
"""Calculate the minimum and maximum possible result (accumulator) values
for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW)
with datatype vec_dt. Returns (acc_min, acc_max).
"""
min_weight = matrix.min()
max_weight = matrix.max()
perceptive_field_elems = matrix.shape[0]
min_input = vec_dt.min()
max_input = vec_dt.max()
# calculate minimum and maximum values of accumulator
# assume inputs span the whole range of the input datatype
acc_min = perceptive_field_elems * min(
min_weight * max_input,
min_weight * min_input,
max_weight * max_input,
max_weight * min_input,
)
acc_max = perceptive_field_elems * max(
min_weight * max_input,
min_weight * min_input,
max_weight * max_input,
max_weight * min_input,
)
return (acc_min, acc_max)
def gen_finn_dt_tensor(finn_dt, tensor_shape):
"""Generates random tensor in given shape and with given FINN DataType."""
if type(tensor_shape) == list:
tensor_shape = tuple(tensor_shape)
if finn_dt == DataType.BIPOLAR:
tensor_values = np.random.randint(2, size=tensor_shape)
tensor_values = 2 * tensor_values - 1
elif finn_dt == DataType.BINARY:
tensor_values = np.random.randint(2, size=tensor_shape)
elif "INT" in finn_dt.name or finn_dt == DataType.TERNARY:
tensor_values = np.random.randint(
finn_dt.min(), high=finn_dt.max() + 1, size=tensor_shape
)
else:
raise ValueError(
"Datatype {} is not supported, no tensor could be generated".format(finn_dt)
)
# always use float type as container
return tensor_values.astype(np.float32)
def calculate_signed_dot_prod_range(dt_a, dt_b, len):
"""Returns the (min,max) values a dot product between two signed vectors of
types dt_a and dt_b of len elements can take."""
assert (
dt_a.signed() and dt_b.signed()
), """The input values are not both
signed vectors."""
min_prod = 2 ** 30
max_prod = -(2 ** 30)
for a_val in [dt_a.min(), dt_a.max()]:
for b_val in [dt_b.min(), dt_b.max()]:
prod = a_val * b_val * len
if prod < min_prod:
min_prod = prod
if prod > max_prod:
max_prod = prod
return (min_prod, max_prod)
def sanitize_quant_values(model, node_tensors, execution_context, check_values=False):
"""Sanitize given list of tensors in execution_context by rounding values
that are supposed to be integers (as indicated by their quantization
annotation). Will raise an assertion if the amount of rounding is too large.
Returns the sanitized execution context.
If check_values is specified, an extra DataType.allowed() check will be
performed on any rounded tensors.
Background:
FINN uses floating point tensors as a carrier data type to represent
integers. Floating point arithmetic can introduce rounding errors, e.g.
(int_num * float_scale) / float_scale is not always equal to int_num.
We use this function to ensure that the values that are supposed to be
integers are indeed integers.
"""
for tensor in node_tensors:
dtype = model.get_tensor_datatype(tensor)
# floats don't need sanitization, skip to next
# introduces less quicker runtime
if dtype == DataType.FLOAT32:
continue
current_values = execution_context[tensor]
updated_values = current_values
has_to_be_rounded = False
# TODO: vectorize with numpy
for value in np.nditer(current_values):
if not dtype.allowed(value):
has_to_be_rounded = True
break
if has_to_be_rounded:
updated_values = np.round(current_values)
warnings.warn(
"The values of tensor {} can't be represented "
"with the set FINN datatype ({}), they will be rounded to match the "
"FINN datatype.".format(tensor, dtype)
)
# check if rounded values are not too far from original values
max_error = max(np.abs(current_values - updated_values).flatten())
if max_error <= get_execution_error_thresh():
if check_values is True:
# check again if values can now be represented with set finn datatype
# TODO: vectorize with numpy
for value in np.nditer(updated_values):
if not dtype.allowed(value):
raise Exception(
"""Values can't be represented with set
finn datatype ({}) for input {}""".format(
dtype, tensor
)
)
execution_context[tensor] = updated_values
else:
raise Exception(
"""Rounding error is too high to match set FINN
datatype ({}) for input {}""".format(
dtype, tensor
)
)
return execution_context
| 38.9819 | 88 | 0.673593 |
0a9f22dd58e0b2b2c094a6f1cf7277e84b5b669b
| 9,455 |
py
|
Python
|
mainmenu.py
|
jeffrypaul37/Hospital-Management-System
|
4ff08bed5387ca23e3f31dbbf46e625d8ae5807b
|
[
"Apache-2.0"
] | null | null | null |
mainmenu.py
|
jeffrypaul37/Hospital-Management-System
|
4ff08bed5387ca23e3f31dbbf46e625d8ae5807b
|
[
"Apache-2.0"
] | null | null | null |
mainmenu.py
|
jeffrypaul37/Hospital-Management-System
|
4ff08bed5387ca23e3f31dbbf46e625d8ae5807b
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
from tkcalendar import Calendar
from datetime import datetime
from datetime import date
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import askyesno
import re
import sqlite3
import tkinter.messagebox
import pandas as pd
import pandas as pd
import datetime
from dateutil import rrule, parser
today = date.today()
date1 = '05-10-2021'
date2 = '12-31-2050'
datesx = pd.date_range(today, date2).tolist()
conn = sqlite3.connect('database copy.db')
c = conn.cursor()
ids = []
root = Tk()
root.title("Shalom Clinic")
#root.geometry("1200x720+0+0")
root.attributes('-fullscreen', True)
root.resizable(0, 0)
Top = Frame(root, bd=1, relief=RIDGE)
Top.pack(side=TOP, fill=X)
Form = Frame(root, height=1)
Form.pack(side=TOP, pady=1)
lbl_title = Label(Top, text = "Shalom Clinic", font=('arial', 15))
lbl_title.pack(fill=X)
options=["Male","Female"]
options1=datesx
options2=["10:00:00","11:00:00","13:00:00"]
options3=["O+","O-","A+","A-","B+","B-","AB+","AB-"]
b = Application(root)
root.resizable(False, False)
root.mainloop()
| 31.20462 | 230 | 0.564886 |
0a9f437ec901227c3a525ef2b2000464e450f945
| 3,399 |
py
|
Python
|
chia_tea/discord/commands/test_wallets.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 6 |
2021-08-05T21:31:15.000Z
|
2021-11-15T20:54:25.000Z
|
chia_tea/discord/commands/test_wallets.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 49 |
2021-08-05T19:33:08.000Z
|
2022-03-30T19:33:38.000Z
|
chia_tea/discord/commands/test_wallets.py
|
Tea-n-Tech/chia-tea
|
a5bd327b9d5e048e55e9f5d8cefca2dbcd5eae96
|
[
"BSD-3-Clause"
] | 1 |
2022-01-09T17:08:32.000Z
|
2022-01-09T17:08:32.000Z
|
import os
import tempfile
import unittest
from datetime import datetime
from google.protobuf.json_format import ParseDict
from ...monitoring.MonitoringDatabase import MonitoringDatabase
from ...protobuf.generated.computer_info_pb2 import ADD, UpdateEvent
from ...protobuf.generated.monitoring_service_pb2 import DataUpdateRequest
from ...utils.testing import async_test
from .wallets import wallets_cmd
| 35.40625 | 74 | 0.527214 |
0a9f5b949039e60cbeefb54542ccaa4f60417abd
| 990 |
py
|
Python
|
render/PC_Normalisation.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 158 |
2020-08-19T18:13:28.000Z
|
2022-03-30T13:55:32.000Z
|
render/PC_Normalisation.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 28 |
2020-05-30T04:02:33.000Z
|
2022-03-30T15:46:38.000Z
|
render/PC_Normalisation.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 18 |
2020-08-19T19:52:38.000Z
|
2022-02-06T11:42:26.000Z
|
# Copyright (c) 2020. Hanchen Wang, [email protected]
import os, open3d, numpy as np
File_ = open('ModelNet_flist_short.txt', 'w')
if __name__ == "__main__":
root_dir = "../data/ModelNet_subset/"
for root, dirs, files in os.walk(root_dir, topdown=False):
for file in files:
if '.ply' in file:
amesh = open3d.io.read_triangle_mesh(os.path.join(root, file))
out_file_name = os.path.join(root, file).replace('.ply', '_normalised.obj')
center = amesh.get_center()
amesh.translate(-center)
maxR = (np.asarray(amesh.vertices)**2).sum(axis=1).max()**(1/2)
# we found divided by (2*maxR) has best rendered visualisation results
amesh.scale(1/(2*maxR))
open3d.io.write_triangle_mesh(out_file_name, amesh)
File_.writelines(out_file_name.replace('.obj', '').replace(root_dir, '') + '\n')
print(out_file_name)
| 41.25 | 96 | 0.586869 |
0aa08817091e8a101312819073f37fbcd1819291
| 20,634 |
py
|
Python
|
pymatgen/apps/battery/insertion_battery.py
|
adozier/pymatgen
|
f1cc4d8db24ec11063be2fd84b4ea911f006eeb7
|
[
"MIT"
] | 18 |
2019-06-15T18:08:21.000Z
|
2022-01-30T05:01:29.000Z
|
ComRISB/pyextern/pymatgen/pymatgen/apps/battery/insertion_battery.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | null | null | null |
ComRISB/pyextern/pymatgen/pymatgen/apps/battery/insertion_battery.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | 11 |
2019-06-05T02:57:55.000Z
|
2021-12-29T02:54:25.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module is used for analysis of materials with potential application as
intercalation batteries.
"""
__author__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "[email protected]"
__date__ = "Jan 13, 2012"
__status__ = "Beta"
import itertools
from pymatgen.core.composition import Composition
from pymatgen.core.units import Charge, Time
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.apps.battery.battery_abc import AbstractElectrode, \
AbstractVoltagePair
from pymatgen.core.periodic_table import Element
from scipy.constants import N_A
def get_unstable_entries(self, charge_to_discharge=True):
"""
Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion.
"""
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_all_entries(self, charge_to_discharge=True):
"""
Return all entries input for the electrode.
Args:
charge_to_discharge:
order from most charge to most discharged state? Defaults to
True.
Returns:
A list of all entries in the electrode (both stable and unstable),
ordered by amount of the working ion.
"""
all_entries = list(self.get_stable_entries())
all_entries.extend(self.get_unstable_entries())
#sort all entries by amount of working ion ASC
fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion)
all_entries = sorted([e for e in all_entries],
key=fsrt)
return all_entries if charge_to_discharge else all_entries.reverse()
def get_max_instability(self, min_voltage=None, max_voltage=None):
"""
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return max(data) if len(data) > 0 else None
def get_min_instability(self, min_voltage=None, max_voltage=None):
"""
The minimum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Minimum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return min(data) if len(data) > 0 else None
def get_max_muO2(self, min_voltage=None, max_voltage=None):
"""
Maximum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return max(data) if len(data) > 0 else None
def get_min_muO2(self, min_voltage=None, max_voltage=None):
"""
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return min(data) if len(data) > 0 else None
def get_sub_electrodes(self, adjacent_only=True, include_myself=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set True.
include_myself: Include this identical electrode in the list of
results.
Returns:
A list of InsertionElectrode objects
"""
battery_list = []
pair_it = self._vpairs if adjacent_only \
else itertools.combinations_with_replacement(self._vpairs, 2)
ion = self._working_ion
for pair in pair_it:
entry_charge = pair.entry_charge if adjacent_only \
else pair[0].entry_charge
entry_discharge = pair.entry_discharge if adjacent_only \
else pair[1].entry_discharge
chg_frac = entry_charge.composition.get_atomic_fraction(ion)
dischg_frac = entry_discharge.composition.get_atomic_fraction(ion)
if include_myself or entry_charge != self.fully_charged_entry \
or entry_discharge != self.fully_discharged_entry:
unstable_entries = filter(in_range,
self.get_unstable_entries())
stable_entries = filter(in_range, self.get_stable_entries())
all_entries = list(stable_entries)
all_entries.extend(unstable_entries)
battery_list.append(self.__class__(all_entries,
self.working_ion_entry))
return battery_list
def as_dict_summary(self, print_subelectrodes=True):
"""
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"formula_discharge": dischg_comp.reduced_formula,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability()}
if print_subelectrodes:
f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d
def __str__(self):
return self.__repr__()
def __repr__(self):
output = []
chg_form = self.fully_charged_entry.composition.reduced_formula
dischg_form = self.fully_discharged_entry.composition.reduced_formula
output.append("InsertionElectrode with endpoints at {} and {}".format(
chg_form, dischg_form))
output.append("Avg. volt. = {} V".format(self.get_average_voltage()))
output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav()))
output.append("Vol. cap. = {}".format(self.get_capacity_vol()))
return "\n".join(output)
class InsertionVoltagePair(AbstractVoltagePair):
"""
Defines an Insertion Voltage Pair.
Args:
entry1: Entry corresponding to one of the entries in the voltage step.
entry2: Entry corresponding to the other entry in the voltage step.
working_ion_entry: A single ComputedEntry or PDEntry representing
the element that carries charge across the battery, e.g. Li.
"""
def __repr__(self):
output = ["Insertion voltage pair with working ion {}"
.format(self._working_ion_entry.composition.reduced_formula),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"mass_charge = {}, mass_discharge = {}"
.format(self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}"
.format(self.vol_charge, self.vol_discharge),
"frac_charge = {}, frac_discharge = {}"
.format(self.frac_charge, self.frac_discharge)]
return "\n".join(output)
def __str__(self):
return self.__repr__()
| 38.932075 | 97 | 0.630804 |
0aa0ecacfe2573f92054f8caab4ad37415452b90
| 7,202 |
py
|
Python
|
python/GafferUI/ColorSwatchPlugValueWidget.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 561 |
2016-10-18T04:30:48.000Z
|
2022-03-30T06:52:04.000Z
|
python/GafferUI/ColorSwatchPlugValueWidget.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 1,828 |
2016-10-14T19:01:46.000Z
|
2022-03-30T16:07:19.000Z
|
python/GafferUI/ColorSwatchPlugValueWidget.py
|
ddesmond/gaffer
|
4f25df88103b7893df75865ea919fb035f92bac0
|
[
"BSD-3-Clause"
] | 120 |
2016-10-18T15:19:13.000Z
|
2021-12-20T16:28:23.000Z
|
##########################################################################
#
# Copyright (c) 2013, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import imath
import Gaffer
import GafferUI
## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a
# PlugValueDialogue base class to share some of the work with the dialogue made by the
# SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should
# actually be functionality of CompoundEditor?
| 34.625 | 142 | 0.71494 |
0aa3e139fa08c65698af3c065bdbf7e9c6759f7b
| 1,946 |
py
|
Python
|
NewsPaperD7(final)/NewsPaper/News/migrations/0001_initial.py
|
GregTMJ/django-files
|
dfd2c8da596522b77fb3dfc8089f0d287a94d53b
|
[
"MIT"
] | 1 |
2021-05-29T21:17:56.000Z
|
2021-05-29T21:17:56.000Z
|
NewsPaperD6/NewsPaper/News/migrations/0001_initial.py
|
GregTMJ/django-files
|
dfd2c8da596522b77fb3dfc8089f0d287a94d53b
|
[
"MIT"
] | null | null | null |
NewsPaperD6/NewsPaper/News/migrations/0001_initial.py
|
GregTMJ/django-files
|
dfd2c8da596522b77fb3dfc8089f0d287a94d53b
|
[
"MIT"
] | 1 |
2021-06-30T12:43:39.000Z
|
2021-06-30T12:43:39.000Z
|
# Generated by Django 3.2 on 2021-04-15 18:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 42.304348 | 153 | 0.611511 |
0aa3ebc9e71e06ddfc092d3a9a924b404661453e
| 2,019 |
py
|
Python
|
osh/cmd_exec_test.py
|
rhencke/oil
|
c40004544e47ee78cde1fcb22c672162b8eb2cd2
|
[
"Apache-2.0"
] | 1 |
2019-01-25T01:15:51.000Z
|
2019-01-25T01:15:51.000Z
|
osh/cmd_exec_test.py
|
rhencke/oil
|
c40004544e47ee78cde1fcb22c672162b8eb2cd2
|
[
"Apache-2.0"
] | null | null | null |
osh/cmd_exec_test.py
|
rhencke/oil
|
c40004544e47ee78cde1fcb22c672162b8eb2cd2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_exec_test.py: Tests for cmd_exec.py
"""
import unittest
from core import test_lib
from core.meta import syntax_asdl, Id
from osh import state
suffix_op = syntax_asdl.suffix_op
osh_word = syntax_asdl.word
word_part = syntax_asdl.word_part
if __name__ == '__main__':
unittest.main()
| 26.92 | 80 | 0.722635 |
0aa43893204c6ba098361aa19c39257195d9d726
| 425 |
py
|
Python
|
blitz_api/migrations/0020_auto_20190529_1200.py
|
MelanieFJNR/Blitz-API
|
9a6daecd158fe07a6aeb80cbf586781eb688f0f9
|
[
"MIT"
] | 3 |
2019-10-22T00:16:49.000Z
|
2021-07-15T07:44:43.000Z
|
blitz_api/migrations/0020_auto_20190529_1200.py
|
MelanieFJNR/Blitz-API
|
9a6daecd158fe07a6aeb80cbf586781eb688f0f9
|
[
"MIT"
] | 1,183 |
2018-04-19T18:40:30.000Z
|
2022-03-31T21:05:05.000Z
|
blitz_api/migrations/0020_auto_20190529_1200.py
|
MelanieFJNR/Blitz-API
|
9a6daecd158fe07a6aeb80cbf586781eb688f0f9
|
[
"MIT"
] | 12 |
2018-04-17T19:16:42.000Z
|
2022-01-27T00:19:59.000Z
|
# Generated by Django 2.0.8 on 2019-05-29 16:00
from django.db import migrations, models
| 22.368421 | 83 | 0.607059 |
0aa458014e027a9ad777515ef9c0b45d42da4384
| 93 |
py
|
Python
|
archiveis/__init__.py
|
palewire/archiveis
|
11b2f1a4be4e7fbdcd52d874733cf20bc2d4f480
|
[
"MIT"
] | 6 |
2021-11-09T11:00:56.000Z
|
2022-01-14T03:44:52.000Z
|
archiveis/__init__.py
|
palewire/archiveis
|
11b2f1a4be4e7fbdcd52d874733cf20bc2d4f480
|
[
"MIT"
] | 4 |
2022-03-28T23:39:23.000Z
|
2022-03-28T23:39:24.000Z
|
archiveis/__init__.py
|
palewire/archiveis
|
11b2f1a4be4e7fbdcd52d874733cf20bc2d4f480
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from .api import capture
__version__ = "0.0.7"
__all__ = ("capture",)
| 15.5 | 24 | 0.677419 |
0aa50be39d8821cc01c657b693bc988aa6fe4578
| 5,265 |
py
|
Python
|
temp/discrete_a2c_agent.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
temp/discrete_a2c_agent.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
temp/discrete_a2c_agent.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | 1 |
2021-11-23T12:30:37.000Z
|
2021-11-23T12:30:37.000Z
|
import numpy as np
import torch
import torch.nn.functional as F
from codes.d_agents.a0_base_agent import float32_preprocessor
from codes.d_agents.on_policy.on_policy_agent import OnPolicyAgent
from codes.e_utils import rl_utils, replay_buffer
from codes.d_agents.actions import ProbabilityActionSelector
from codes.e_utils.names import DeepLearningModelName, AgentMode
| 41.785714 | 128 | 0.688319 |
0aa514fa3ff45ce4defbd248dad8a995955378b1
| 188 |
py
|
Python
|
edmundbotadder/cogs/webhook.py
|
thebeanogamer/edmund-botadder
|
91e71ce572f3206b99e1f7a68d40bc37b947daf5
|
[
"MIT"
] | null | null | null |
edmundbotadder/cogs/webhook.py
|
thebeanogamer/edmund-botadder
|
91e71ce572f3206b99e1f7a68d40bc37b947daf5
|
[
"MIT"
] | null | null | null |
edmundbotadder/cogs/webhook.py
|
thebeanogamer/edmund-botadder
|
91e71ce572f3206b99e1f7a68d40bc37b947daf5
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import Bot, Cog
| 15.666667 | 41 | 0.696809 |
0aa6e5ef18ddd1cd84d84ba40a68b3ca12d3ecf7
| 789 |
py
|
Python
|
apps/core/forms.py
|
allexvissoci/djangoecommerce
|
645c05daa5f13c1e42184a7c6f534b9c260d280a
|
[
"CC0-1.0"
] | null | null | null |
apps/core/forms.py
|
allexvissoci/djangoecommerce
|
645c05daa5f13c1e42184a7c6f534b9c260d280a
|
[
"CC0-1.0"
] | null | null | null |
apps/core/forms.py
|
allexvissoci/djangoecommerce
|
645c05daa5f13c1e42184a7c6f534b9c260d280a
|
[
"CC0-1.0"
] | null | null | null |
from django import forms
from django.core.mail import send_mail
from django.conf import settings
| 32.875 | 75 | 0.595691 |
0aa6ee42edcf06446ba2b86d62dbe6a27542ef2e
| 5,475 |
py
|
Python
|
Fchat/Gui/AddFriendWidget.py
|
jamesaxl/FreeSnake
|
3cef45165bce50d0f296e0d016b49d45aa31a653
|
[
"BSD-2-Clause-FreeBSD"
] | 2 |
2018-11-15T22:55:01.000Z
|
2020-01-01T21:21:07.000Z
|
Fchat/Gui/AddFriendWidget.py
|
jamesaxl/FreeSnake
|
3cef45165bce50d0f296e0d016b49d45aa31a653
|
[
"BSD-2-Clause-FreeBSD"
] | 2 |
2019-11-10T20:31:29.000Z
|
2021-07-31T18:24:47.000Z
|
Fchat/Gui/AddFriendWidget.py
|
jamesaxl/FreeSnake
|
3cef45165bce50d0f296e0d016b49d45aa31a653
|
[
"BSD-2-Clause-FreeBSD"
] | 1 |
2018-11-15T22:55:17.000Z
|
2018-11-15T22:55:17.000Z
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, Gdk
| 35.784314 | 107 | 0.667032 |
0aa7a0ee45227c9db1cfad5be465b9e3f1596fbf
| 533 |
py
|
Python
|
python01/game.py
|
liyan2013/hogwarts
|
4b81d968b049a13cb2aa293d32c034ca3a30ee79
|
[
"Apache-2.0"
] | null | null | null |
python01/game.py
|
liyan2013/hogwarts
|
4b81d968b049a13cb2aa293d32c034ca3a30ee79
|
[
"Apache-2.0"
] | null | null | null |
python01/game.py
|
liyan2013/hogwarts
|
4b81d968b049a13cb2aa293d32c034ca3a30ee79
|
[
"Apache-2.0"
] | null | null | null |
import random
if __name__ == '__main__':
game()
| 17.766667 | 51 | 0.463415 |
0aa7b0d58d58a3a7d6eb18bd016c9b5d7166087a
| 681 |
py
|
Python
|
petstore/api/api_response.py
|
andrii-grytsenko/io.swagger.petstore3.testing
|
81a0a16d574d0c0664b297e7ba7ff2bb5a9a0c40
|
[
"MIT"
] | null | null | null |
petstore/api/api_response.py
|
andrii-grytsenko/io.swagger.petstore3.testing
|
81a0a16d574d0c0664b297e7ba7ff2bb5a9a0c40
|
[
"MIT"
] | null | null | null |
petstore/api/api_response.py
|
andrii-grytsenko/io.swagger.petstore3.testing
|
81a0a16d574d0c0664b297e7ba7ff2bb5a9a0c40
|
[
"MIT"
] | null | null | null |
from enum import Enum
| 25.222222 | 102 | 0.654919 |
0aa7bee18a6d9f952d21c773ce61328493a8b54b
| 7,503 |
py
|
Python
|
test/integration/component/test_browse_templates2.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 1,131 |
2015-01-08T18:59:06.000Z
|
2022-03-29T11:31:10.000Z
|
test/integration/component/test_browse_templates2.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 5,908 |
2015-01-13T15:28:37.000Z
|
2022-03-31T20:31:07.000Z
|
test/integration/component/test_browse_templates2.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 1,083 |
2015-01-05T01:16:52.000Z
|
2022-03-31T12:14:10.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.codes import PASS, FAILED, SUCCESS, XEN_SERVER
from marvin.sshClient import SshClient
import requests
requests.packages.urllib3.disable_warnings()
import random
import string
import telnetlib
import os
import urllib.request, urllib.parse, urllib.error
import time
import tempfile
_multiprocess_shared_ = True
| 35.060748 | 144 | 0.620552 |
0aa818d9912fa4e7124c341cc827cf2ddf2f640c
| 11,501 |
py
|
Python
|
tests/components/ozw/test_websocket_api.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1 |
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/ozw/test_websocket_api.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47 |
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/ozw/test_websocket_api.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test OpenZWave Websocket API."""
from unittest.mock import patch
from openzwavemqtt.const import (
ATTR_CODE_SLOT,
ATTR_LABEL,
ATTR_OPTIONS,
ATTR_POSITION,
ATTR_VALUE,
ValueType,
)
from openpeerpower.components.ozw.const import ATTR_CONFIG_PARAMETER
from openpeerpower.components.ozw.lock import ATTR_USERCODE
from openpeerpower.components.ozw.websocket_api import (
ATTR_IS_AWAKE,
ATTR_IS_BEAMING,
ATTR_IS_FAILED,
ATTR_IS_FLIRS,
ATTR_IS_ROUTING,
ATTR_IS_SECURITYV1,
ATTR_IS_ZWAVE_PLUS,
ATTR_NEIGHBORS,
ATTR_NODE_BASIC_STRING,
ATTR_NODE_BAUD_RATE,
ATTR_NODE_GENERIC_STRING,
ATTR_NODE_QUERY_STAGE,
ATTR_NODE_SPECIFIC_STRING,
ID,
NODE_ID,
OZW_INSTANCE,
PARAMETER,
SCHEMA,
TYPE,
VALUE,
)
from openpeerpower.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
)
from .common import MQTTMessage, setup_ozw
| 29.795337 | 87 | 0.629858 |
0aaa20d8b1879b4c1bc74cd5f86f6df85b43a7e8
| 30,173 |
py
|
Python
|
tests/test_formatters.py
|
samueljacques-qc/notification-utils
|
77f09cb2633ea5938a28ed50c21c7ae5075da7f2
|
[
"MIT"
] | null | null | null |
tests/test_formatters.py
|
samueljacques-qc/notification-utils
|
77f09cb2633ea5938a28ed50c21c7ae5075da7f2
|
[
"MIT"
] | null | null | null |
tests/test_formatters.py
|
samueljacques-qc/notification-utils
|
77f09cb2633ea5938a28ed50c21c7ae5075da7f2
|
[
"MIT"
] | null | null | null |
import pytest
from flask import Markup
from notifications_utils.formatters import (
unlink_govuk_escaped,
notify_email_markdown,
notify_letter_preview_markdown,
notify_plain_text_email_markdown,
sms_encode,
formatted_list,
strip_dvla_markup,
strip_pipes,
escape_html,
remove_whitespace_before_punctuation,
make_quotes_smart,
replace_hyphens_with_en_dashes,
tweak_dvla_list_markup,
nl2li,
strip_whitespace,
strip_and_remove_obscure_whitespace,
remove_smart_quotes_from_email_addresses,
strip_unsupported_characters,
normalise_whitespace
)
from notifications_utils.template import (
HTMLEmailTemplate,
PlainTextEmailTemplate,
SMSMessageTemplate,
SMSPreviewTemplate
)
def test_handles_placeholders_in_urls():
assert notify_email_markdown(
"http://example.com/?token=<span class='placeholder'>((token))</span>&key=1"
) == (
'<p style="Margin: 0 0 20px 0; font-size: 11pt; line-height: 25px; color: #0B0C0C;">'
'<a style="word-wrap: break-word; color: #005ea5;" href="http://example.com/?token=">'
'http://example.com/?token='
'</a>'
'<span class=\'placeholder\'>((token))</span>&key=1'
'</p>'
)
def test_sms_preview_adds_newlines():
template = SMSPreviewTemplate({'content': """
the
quick
brown fox
"""})
template.prefix = None
template.sender = None
assert '<br>' in str(template)
def test_footnotes():
# Cant work out how to test this
pass
def test_unicode_dash_lookup():
en_dash_replacement_sequence = '\u0020\u2013'
hyphen = '-'
en_dash = ''
space = ' '
non_breaking_space = ''
assert en_dash_replacement_sequence == space + en_dash
assert non_breaking_space not in en_dash_replacement_sequence
assert hyphen not in en_dash_replacement_sequence
def test_strip_and_remove_obscure_whitespace_only_removes_normal_whitespace_from_ends():
sentence = ' words \n over multiple lines with \ttabs\t '
assert strip_and_remove_obscure_whitespace(sentence) == 'words \n over multiple lines with \ttabs'
def test_remove_smart_quotes_from_email_addresses():
assert remove_smart_quotes_from_email_addresses("""
line ones quote
[email protected] is someones email address
line three
""") == ("""
line ones quote
first.o'[email protected] is someones email address
line three
""")
def test_strip_unsupported_characters():
assert strip_unsupported_characters("line one\u2028line two") == ("line oneline two")
def test_normalise_whitespace():
assert normalise_whitespace('\u200C Your tax is\ndue\n\n') == 'Your tax is due'
| 28.094041 | 190 | 0.545388 |
0aaa92e8b56443a2b167621484f9881042d7391b
| 983 |
py
|
Python
|
ProgramFlow/functions/banner.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
ProgramFlow/functions/banner.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
ProgramFlow/functions/banner.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
banner_text("*")
banner_text("Always look on the bright side of life...")
banner_text("If life seems jolly rotten,")
banner_text("There's something you've forgotten!")
banner_text("And that's to laugh and smile and dance and sing,")
banner_text(" ")
banner_text("When you're feeling in the dumps,")
banner_text("Don't be silly chumps,")
banner_text("Just purse your lips and whistle - that's the thing!")
banner_text("And... always look on the bright side of life...")
banner_text("*")
result = banner_text("Nothing is returned")
print(result)
numbers = [4, 2, 7, 5, 8, 3, 9, 6, 1]
print(numbers.sort())
| 30.71875 | 67 | 0.66531 |
0aab53b2cca857c20d172807e1c32d755b133366
| 153 |
py
|
Python
|
Adafruit_BluefruitLE/interfaces/__init__.py
|
acoomans/Adafruit_Python_BluefruitLE
|
34fc6f596371b961628369d78ce836950514062f
|
[
"MIT"
] | 415 |
2015-08-19T00:07:10.000Z
|
2022-03-14T13:35:45.000Z
|
Adafruit_BluefruitLE/interfaces/__init__.py
|
acoomans/Adafruit_Python_BluefruitLE
|
34fc6f596371b961628369d78ce836950514062f
|
[
"MIT"
] | 51 |
2015-09-30T14:42:01.000Z
|
2020-11-02T21:12:26.000Z
|
Adafruit_BluefruitLE/interfaces/__init__.py
|
acoomans/Adafruit_Python_BluefruitLE
|
34fc6f596371b961628369d78ce836950514062f
|
[
"MIT"
] | 174 |
2015-10-06T07:16:51.000Z
|
2022-03-14T13:35:50.000Z
|
from .provider import Provider
from .adapter import Adapter
from .device import Device
from .gatt import GattService, GattCharacteristic, GattDescriptor
| 30.6 | 65 | 0.843137 |
0aab7620f824873c7b572e13e03aa334f91e254d
| 143 |
py
|
Python
|
axju/generic/__init__.py
|
axju/axju
|
de0b3d9c63b7cca4ed16fb50e865c159b4377953
|
[
"MIT"
] | null | null | null |
axju/generic/__init__.py
|
axju/axju
|
de0b3d9c63b7cca4ed16fb50e865c159b4377953
|
[
"MIT"
] | null | null | null |
axju/generic/__init__.py
|
axju/axju
|
de0b3d9c63b7cca4ed16fb50e865c159b4377953
|
[
"MIT"
] | null | null | null |
from axju.generic.basic import BasicWorker
from axju.generic.execution import ExecutionWorker
from axju.generic.template import TemplateWorker
| 35.75 | 50 | 0.874126 |
0aab9dbbc4006ac10614eb6e13f1101929dde5bc
| 5,242 |
py
|
Python
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 13 |
2015-02-19T17:17:10.000Z
|
2021-12-22T06:48:02.000Z
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 85 |
2015-01-06T15:01:51.000Z
|
2018-11-29T09:03:35.000Z
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 22 |
2015-06-09T12:08:29.000Z
|
2018-11-20T10:07:01.000Z
|
#!/usr/bin/env python
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, <[email protected]>, 2014
# objectstoreSiteMover.py
import os
from configSiteMover import config_sm
import SiteMover
from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover
from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover
if __name__ == '__main__':
os.environ['PilotHomeDir'] = os.getcwd()
from SiteInformation import SiteInformation
s1 = SiteInformation()
#s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE')
f = objectstoreSiteMover()
gpfn = "nonsens_gpfn"
lfn = "AOD.310713._000004.pool.root.1"
path = os.getcwd()
fsize = "4261010441"
fchecksum = "9145af38"
dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00"
report = {}
#print f.getGlobalFilePaths(dsname)
#print f.findGlobalFilePath(lfn, dsname)
#print f.getLocalROOTSetup()
#path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename"
"""
source = "/bin/hostname"
dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = 17848
localChecksum = "89b93830"
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = 17848
localChecksum = "89b93830"
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
"""
# test S3 object store
source = "/bin/hostname"
#dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = None
localChecksum = None
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='')
gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = None
localChecksum = None
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='deb05b9fb5034a45b80c03bd671359c9')
| 44.803419 | 256 | 0.702404 |
0aac44d185f9607658c52f2deb96d4cdd7259f28
| 5,384 |
py
|
Python
|
codigos_videos/Exemplo_2.py
|
Miguel-mmf/Biblioteca_Dash_em-Python
|
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
|
[
"MIT"
] | 1 |
2022-03-17T13:55:33.000Z
|
2022-03-17T13:55:33.000Z
|
codigos_videos/Exemplo_2.py
|
Miguel-mmf/Biblioteca_Dash_em-Python
|
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
|
[
"MIT"
] | null | null | null |
codigos_videos/Exemplo_2.py
|
Miguel-mmf/Biblioteca_Dash_em-Python
|
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
|
[
"MIT"
] | 1 |
2020-12-12T21:56:06.000Z
|
2020-12-12T21:56:06.000Z
|
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Esse arquivo possui algumas modificaes em relao ao arquivo apresentado no vdeo do YouTube
# No deixe de assistir o vdeo e estudar pela documentao ofical Dash
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# importando as bibliotecas necessrias
import dash
import dash_core_components as dcc
import dash_html_components as html
# importando as funes que auxiliam no funcionamento das callbacks do subpacote dependencies do pacote dash
from dash.dependencies import Input, Output
# importando o mdulo graph_objects da biblioteca plotly
import plotly.graph_objects as go
# adicionando um estilo externo atravs do link abaixo
# esse link o recomendado pela documentao da biblioteca Dash e ao acessar esse link no seu navegador,
# voc perceber que ele possui a estrutura de um arquivo CSS
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# criando a aplicao por meio da funo Dash do pacote dash e atribuindo a varivel app
app = dash.Dash(
__name__,
external_stylesheets=external_stylesheets
)
# criando uma funo para gerar um grfico com a biblioteca plotly.graph_objects
# criando um layout para a varivel app
# adicionando ao layout um componente html.Div que ir conter os demais componentes que do forma
app.layout = html.Div([
# inserindo um componente da biblioteca dash HTML components como ttulo/cabealho do layout
html.H2(
['Painel de Visualizao de Grficos'],
# o parmetro style define estilos css para o componente
style={
'textAlign':'center', # texto alinhado
'font-weight':'bold' # texto em negrito
}
),
# adicionando uma linha horizontal no layout
html.Hr(),
# criando abas pai dentro do layout
dcc.Tabs(
# identidade/nome do componente
id='tabs',
# criando as abas filhas dentro do parmetro children da funo Tabs()
children=[
dcc.Tab(label='Grfico de linha',value='tab-1'),
dcc.Tab(label='Grfico de Barra',value='tab-2'),
dcc.Tab(label='Grfico de Linha e Pontos',value='tab-3')
]
),
# onde ser apresentado o contedo das abas logo aps a callback ser ativada
html.Div(id='tabs-content'),
html.Hr(),
])
# Callback
# estruturando a callback com as entradas (input) e sadas (output)
# servindo a aplicao em dash como verso para teste
if __name__ == "__main__":
app.run_server(debug=True)
| 32.630303 | 202 | 0.610513 |
0aacd880ac180e15a9b5e161088e8e7de26eb77d
| 26,616 |
py
|
Python
|
Lab 2/javaccflab/formatter.py
|
tochanenko/MetaProgramming
|
d37f21432483e39e135fd0dc4f8767836eea1609
|
[
"MIT"
] | null | null | null |
Lab 2/javaccflab/formatter.py
|
tochanenko/MetaProgramming
|
d37f21432483e39e135fd0dc4f8767836eea1609
|
[
"MIT"
] | null | null | null |
Lab 2/javaccflab/formatter.py
|
tochanenko/MetaProgramming
|
d37f21432483e39e135fd0dc4f8767836eea1609
|
[
"MIT"
] | null | null | null |
import re
import datetime
from javaccflab.lexer import parse
from javaccflab.java_token import TokenType, Token, update_token_value
| 42.449761 | 184 | 0.527916 |
0aad63892e2757c199be78dfebc46a66c8e7becf
| 3,783 |
py
|
Python
|
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 63 |
2020-04-20T16:31:16.000Z
|
2022-03-29T01:05:35.000Z
|
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 5 |
2020-04-21T11:31:39.000Z
|
2022-03-24T13:42:56.000Z
|
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 8 |
2020-04-21T09:16:42.000Z
|
2022-02-23T16:28:43.000Z
|
"""
.. module:: CAttackEvasionPGDExp
:synopsis: Evasion attack using Projected Gradient Descent.
.. moduleauthor:: Battista Biggio <[email protected]>
"""
from secml.adv.attacks.evasion import CAttackEvasionPGDLS
| 36.375 | 88 | 0.649749 |
0aaf87f9c4ecb098ef160db1b17bb991d9edaacc
| 3,172 |
py
|
Python
|
mail_log_parser/data_manager.py
|
kinteriq/mail-log-parser
|
e4242387c1767db611e266d463c817aeb8a74377
|
[
"MIT"
] | null | null | null |
mail_log_parser/data_manager.py
|
kinteriq/mail-log-parser
|
e4242387c1767db611e266d463c817aeb8a74377
|
[
"MIT"
] | null | null | null |
mail_log_parser/data_manager.py
|
kinteriq/mail-log-parser
|
e4242387c1767db611e266d463c817aeb8a74377
|
[
"MIT"
] | null | null | null |
import sqlite3
| 38.216867 | 80 | 0.596154 |
0ab07fb42baef7b3a437132ef3d9c03a2ec1e478
| 561 |
py
|
Python
|
Util/training_util.py
|
lychenyoko/content-aware-gan-compression
|
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 47 |
2021-07-04T14:51:38.000Z
|
2022-03-17T07:02:06.000Z
|
Util/training_util.py
|
lychenyoko/content-aware-gan-compression
|
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 9 |
2021-04-10T08:32:08.000Z
|
2022-02-21T03:14:40.000Z
|
Util/training_util.py
|
lychenyoko/content-aware-gan-compression
|
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 7 |
2021-07-02T08:11:55.000Z
|
2022-01-12T18:06:40.000Z
|
import math
| 33 | 83 | 0.68984 |
0ab1197213ef76c98f29e3e63d45d1418b5c01ea
| 583 |
py
|
Python
|
app/configs/development_settings.py
|
DIS-SIN/FlaskShell
|
5f6d0cfeac8bea0b274d16a497e3a20cd00b155a
|
[
"CC0-1.0"
] | null | null | null |
app/configs/development_settings.py
|
DIS-SIN/FlaskShell
|
5f6d0cfeac8bea0b274d16a497e3a20cd00b155a
|
[
"CC0-1.0"
] | null | null | null |
app/configs/development_settings.py
|
DIS-SIN/FlaskShell
|
5f6d0cfeac8bea0b274d16a497e3a20cd00b155a
|
[
"CC0-1.0"
] | null | null | null |
######################################################## FLASK SETTINGS ##############################################################
#Variable used to securly sign cookies
##THIS IS SET IN DEV ENVIRONMENT FOR CONVENIENCE BUT SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PROD
SECRET_KEY = "dev"
######################################################## DATABSE SETTINGS ####################################################
#Neo4j Database URI used by the Neomodel OGM
## THIS SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PRODUCTION ##
DATABASE_URI = "bolt://test:test@localhost:7687"
| 58.3 | 134 | 0.476844 |
0ab246f495c8f138c1a41820ece75a23cb6ba83c
| 37,122 |
py
|
Python
|
autoarray/structures/grids/two_d/grid_2d_util.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
autoarray/structures/grids/two_d/grid_2d_util.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
autoarray/structures/grids/two_d/grid_2d_util.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import Tuple, Union, Optional
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.geometry import geometry_util
from autoarray import numba_util
from autoarray.mask import mask_2d_util
def grid_2d_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are
given values (0.0, 0.0).
Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
grid_2d_slim = grid_2d_slim_via_mask_from(
mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
return grid_2d_native_from(
grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size
)
def grid_2d_slim_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_slim_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def grid_2d_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided
into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes
the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def grid_2d_slim_from(
grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked
pixels to a slimmed grid of shape [total_unmasked_pixels, 2].
The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such
that for an grid of shape (3,3) where all pixels are unmasked:
- pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid.
- pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid.
- pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid.
Parameters
----------
grid_2d_native : ndarray
The native grid of (y,x) values which are mapped to the slimmed grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels).
"""
grid_1d_slim_y = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size
)
grid_1d_slim_x = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size
)
return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1)
def grid_2d_native_from(
grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values
from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the
native 2D grid where masked values are set to zero.
This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked
pixels, for example:
- If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid.
- If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid.
- If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid.
Parameters
----------
grid_2d_slim
The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D
mapped from the slimmed grid.
"""
grid_2d_native_y = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size
)
grid_2d_native_x = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size
)
return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
| 39.787781 | 130 | 0.645574 |
0ab2e129e7612f7fdafee8257f8411edf7808187
| 2,689 |
py
|
Python
|
Proxies/Proxies.py
|
crown-prince/proxies
|
a3342d414675dbc89cdf1b953b46ea518f451166
|
[
"MIT"
] | 2 |
2018-08-28T06:34:16.000Z
|
2018-12-05T01:33:33.000Z
|
Proxies/Proxies.py
|
crown-prince/proxies
|
a3342d414675dbc89cdf1b953b46ea518f451166
|
[
"MIT"
] | null | null | null |
Proxies/Proxies.py
|
crown-prince/proxies
|
a3342d414675dbc89cdf1b953b46ea518f451166
|
[
"MIT"
] | 3 |
2017-11-23T03:16:49.000Z
|
2019-05-05T05:23:57.000Z
|
# coding: utf-8
import requests, math
import gevent
from gevent.queue import Queue
from gevent import monkey; monkey.patch_all()
from pyquery import PyQuery
if __name__ == '__main__':
p = Proxies()
p.get_proxies(20, 1)
result = p.get_result()
print(result)
| 32.39759 | 150 | 0.531424 |
0ab39451258fbf9b0748574dd450aedbe38e6382
| 21,092 |
py
|
Python
|
parallelformers/policies/base/auto.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 454 |
2021-07-18T02:51:23.000Z
|
2022-03-31T04:00:53.000Z
|
parallelformers/policies/base/auto.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 16 |
2021-07-18T10:47:21.000Z
|
2022-03-22T18:49:57.000Z
|
parallelformers/policies/base/auto.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 33 |
2021-07-18T04:48:28.000Z
|
2022-03-14T22:16:36.000Z
|
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import suppress
from typing import List, Union
from torch import nn
from parallelformers.policies.base import Policy
| 30.836257 | 88 | 0.585815 |
0ab472bf8d8e22693d678c504a9b881ed31f9478
| 3,042 |
py
|
Python
|
main/upper_air_humidity.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
main/upper_air_humidity.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
main/upper_air_humidity.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Name: upper_air_humidity.py
Make upper level weather chart.
Usage: python3 upper_air_humidity.py --file <ncfile>
Author: Ryosuke Tomita
Date: 2022/01/07
"""
import argparse
from ncmagics import fetchtime, japanmap, meteotool
def parse_args() -> dict:
"""parse_args.
set file path.
Args:
Returns:
dict:
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="set ncfile.", type=str)
p = parser.parse_args()
args = {"file": p.file}
return args
def output_name(ncfile: str, isobaric_surface: int) -> str:
"""output_name.
Args:
ncfile (str): ncfile
isobaric_surface (int): isobaric_surface
Returns:
str:
"""
date_time = fetchtime.fetch_time(ncfile)
outname = (date_time + "_" + str(isobaric_surface))
return outname
def main():
"""main.
"""
args = parse_args()
meteo_tool = meteotool.MeteoTools(args["file"])
lat, lon = meteo_tool.get_lat_lon()
isobaric_surface = (850, 500, 300)
#label_upper = (30, 0)
#lebel_min = (-30, -60)
for i, pressure in enumerate(isobaric_surface):
# get parameter
temp_c = meteo_tool.get_parameter('t', isobaric_surface=pressure) - 273.15
rh = meteo_tool.get_parameter('r', isobaric_surface=pressure)
height_gpm = meteo_tool.get_parameter('gh', isobaric_surface=pressure)
u_wind = meteo_tool.get_parameter('u', isobaric_surface=pressure)
v_wind = meteo_tool.get_parameter('v', isobaric_surface=pressure)
jp_map = japanmap.JpMap()
jp_map.contour_plot(lon, lat, height_gpm)
#jp_map.shade_plot(lon, lat, temp_c,
# label="2m temperature ($^\circ$C)",
# color_bar_label_max=label_upper[i],
# color_bar_label_min=lebel_min[i],
# color_map_type="temperature",
# double_color_bar=True,)
jp_map.shade_plot(lon, lat, rh,
label="relative humidity (%)",
color_bar_label_max=100,
color_bar_label_min=0,
color_map_type="gray",
double_color_bar=False,)
jp_map.vector_plot(lon, lat, u_wind, v_wind,
vector_interval=5, vector_scale=10, mode="wind")
#jp_map.gray_shade(lon, lat, rh,
# label="relative humidity (%)",
# color_bar_label_max=100,
# color_bar_label_min=0,
# )
if pressure == 850:
jp_map.color_line(lon, lat, temp_c, line_value=-6, color='#0000ff')
if pressure == 500:
jp_map.color_line(lon, lat, temp_c, line_value=-36, color='#b22222')
outname = output_name(args["file"], pressure)
print(outname)
jp_map.save_fig(outname, str(pressure) + "hPa")
if __name__ == "__main__":
main()
| 31.040816 | 82 | 0.575608 |
0ab496b4beb92ca3fe52c60cfcbb81b2b17b5de3
| 22,976 |
py
|
Python
|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
|
amCharlie/hive
|
e1870c190188a3b706849059969c8bec2220b6d2
|
[
"Apache-2.0"
] | 2 |
2021-04-24T08:07:45.000Z
|
2021-04-24T08:07:46.000Z
|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
|
amCharlie/hive
|
e1870c190188a3b706849059969c8bec2220b6d2
|
[
"Apache-2.0"
] | 14 |
2020-12-26T22:01:38.000Z
|
2022-02-09T22:41:46.000Z
|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
|
amCharlie/hive
|
e1870c190188a3b706849059969c8bec2220b6d2
|
[
"Apache-2.0"
] | 7 |
2021-08-16T07:49:24.000Z
|
2022-03-17T09:04:34.000Z
|
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
all_structs.append(MiniStruct)
MiniStruct.thrift_spec = (
None, # 0
(1, TType.STRING, 'my_string', 'UTF8', None, ), # 1
(2, TType.I32, 'my_enum', None, None, ), # 2
)
all_structs.append(MegaStruct)
MegaStruct.thrift_spec = (
None, # 0
(1, TType.BOOL, 'my_bool', None, None, ), # 1
(2, TType.BYTE, 'my_byte', None, None, ), # 2
(3, TType.I16, 'my_16bit_int', None, None, ), # 3
(4, TType.I32, 'my_32bit_int', None, None, ), # 4
(5, TType.I64, 'my_64bit_int', None, None, ), # 5
(6, TType.DOUBLE, 'my_double', None, None, ), # 6
(7, TType.STRING, 'my_string', 'UTF8', None, ), # 7
(8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8
(9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9
(10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10
(11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11
(12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12
(13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13
(14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14
(15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15
(16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16
(17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17
(18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18
(19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19
(20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20
)
fix_spec(all_structs)
del all_structs
| 43.680608 | 430 | 0.550531 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.