blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
374c960f285d4baaf8c9ce3b8205ea9135cd46b5
|
d571d407cfda435fcab8b7ccadb1be812c7047c7
|
/guild/tests/samples/projects/flags-dest/submod.py
|
6416966dcb06abb15601a3c47796545306d1ac5c
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
guildai/guildai
|
2d8661a2a6bf0d1ced6334095c8bf5a8e391d8af
|
149055da49f57eaf4aec418f2e339c8905c1f02f
|
refs/heads/main
| 2023-08-25T10:09:58.560059 | 2023-08-12T20:19:05 | 2023-08-12T20:19:05 | 105,057,392 | 833 | 86 |
Apache-2.0
| 2023-08-07T19:34:27 | 2017-09-27T18:57:50 |
Python
|
UTF-8
|
Python
| false | false | 169 |
py
|
import argparse
p = argparse.ArgumentParser()
p.add_argument("--bar", default=456)
if __name__ == "__main__":
args = p.parse_args()
print("bar: %s", args.bar)
|
[
"[email protected]"
] | |
4be16408f688e6b5cb887f4a18ae62b9a56fd20a
|
af3ec207381de315f4cb6dddba727d16d42d6c57
|
/dialogue-engine/test/programytest/parser/template/node_tests/richmedia_tests/test_carousel.py
|
00edd17c4ac2091155b647b8e43f064c5c9e3f10
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcf-yuichi/cotoba-agent-oss
|
02a5554fe81ce21517f33229101013b6487f5404
|
ce60833915f484c4cbdc54b4b8222d64be4b6c0d
|
refs/heads/master
| 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,968 |
py
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.richmedia.carousel import TemplateCarouselNode
from programy.parser.template.nodes.richmedia.card import TemplateCardNode
from programy.parser.template.nodes.richmedia.button import TemplateButtonNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class TemplateCarouselNodeTests(ParserTestsBaseClass):
def test_carousel_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
carousel = TemplateCarouselNode()
card = TemplateCardNode()
card._image = TemplateWordNode("http://Servusai.com")
card._title = TemplateWordNode("Servusai.com")
card._subtitle = TemplateWordNode("The home of ProgramY")
button = TemplateButtonNode()
button._text = TemplateWordNode("More...")
button._url = TemplateWordNode("http://Servusai.com/aiml")
card._buttons.append(button)
carousel._cards.append(card)
root.append(carousel)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
texts1 = "<carousel><card><title>Servusai.com</title><subtitle>The home of ProgramY</subtitle>" + \
"<button><text>More...</text><url>http://Servusai.com/aiml</url></button></card></carousel>"
self.assertEqual(texts1, resolved)
texts2 = "<carousel><card><title>Servusai.com</title><subtitle>The home of ProgramY</subtitle>" + \
"<button><text>More...</text><url>http://Servusai.com/aiml</url></button></card></carousel>"
self.assertEqual(texts2, root.to_xml(self._client_context))
|
[
"[email protected]"
] | |
21c0443c59c65a77addd1fcc15d3b94c4b037acc
|
7884d75d9835493ff627d0468e94fe7a838a6aa1
|
/ocr_server/restapi/recognize.py
|
ff03aad6edbfd4381bcff7e6a1e274ae8b269893
|
[] |
no_license
|
fjibj/OCRServer
|
d2c7c5217046ffbec6f2affdd1c77379f9453d67
|
e23c23198fc89feb2f714faf2d022b1e21ac2151
|
refs/heads/master
| 2020-03-18T18:50:51.501629 | 2017-01-18T08:35:40 | 2017-01-18T08:35:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 817 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
CONFIG_PATH = './recognize/config.yaml'
CONFIG_DATA = {}
def loadConfig():
global CONFIG_DATA
f = open(CONFIG_PATH, encoding='utf-8')
CONFIG_DATA = yaml.safe_load(f)
f.close()
for name in CONFIG_DATA:
config = CONFIG_DATA[name]
if 'option' not in config['feature']:
config['feature']['option'] = {}
if 'rotate' not in config:
config['rotate'] = 'perspective'
if 'validate' not in config:
config['validate'] = {
'roi': None
}
if 'roi' not in config['validate']:
config['validate']['roi'] = None
if 'roi' not in config:
config['roi'] = {}
return
def getConfig():
return CONFIG_DATA
|
[
"="
] |
=
|
62fd7f83ae140306bbea3251e99b3e08f097a951
|
61dfa0ac80a6979d135e969b5b7b78a370c16904
|
/analysis/projections/get_projection_alpha_dm.py
|
7abcc8371fcb01ab5adfeceb45330d9e01e88595
|
[] |
no_license
|
bvillasen/cosmo_tools
|
574d84f9c18d92d2a9610d1d156113730d80f5a4
|
6bb54534f2242a15a6edcf696f29a3cf22edd342
|
refs/heads/master
| 2021-07-13T06:43:32.902153 | 2020-10-05T21:17:30 | 2020-10-05T21:17:30 | 207,036,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,967 |
py
|
import sys, os
import numpy as np
import h5py as h5
import matplotlib.pyplot as plt
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# import matplotlib.transforms as tfrms
# import matplotlib
# import matplotlib as mpl
# mpl.rcParams['savefig.pad_inches'] = 0
# import palettable.cmocean.sequential as colors
# list_of_colors = ['Algae', 'Amp', 'Deep', 'Dense', 'Gray', 'Haline', 'Ice',
# 'Matter', 'Oxy', 'Phase', 'Solar', 'Speed', 'Tempo', 'Thermal', 'Turbid']
cosmo_dir = os.path.dirname(os.path.dirname(os.getcwd())) + '/'
dataDir = cosmo_dir + 'data/'
subDirectories = [x[0] for x in os.walk(cosmo_dir)]
sys.path.extend(subDirectories)
from load_data_cholla import load_snapshot_data, load_snapshot_data_distributed, load_snapshot_data_distributed_periodix_x
from tools import *
from congrid import *
import scipy.ndimage
cosmo_dir = os.path.dirname(os.path.dirname(os.getcwd())) + '/'
subDirectories = [x[0] for x in os.walk(cosmo_dir)]
sys.path.extend(subDirectories)
from domain_decomposition import get_domain_block
from projection_functions import rescale_image, get_rescaled_image
def get_distance_factor( index, index_front, index_middle, index_b, middle_point):
index_rescaled = (index) - index_middle
middle_point = np.float(middle_point)
slope = ( middle_point ) / index_middle - index_front
if index_rescaled <= index_middle: index_rescaled = index_front + slope*index
else: index_rescaled = index - index_middle + middle_point
index_rescaled = np.float( index_rescaled)
if index_rescaled < 1: index_rescaled = 1
return index_rescaled**(-0.8)
def get_distance_factor_linear( index, index_front, index_b, value_back):
value_front = 1.0
slope = ( value_back - value_front ) / (index_b - index_front)
distance_factor = value_front + slope * index
return distance_factor
def get_transparency_factor_linear( indx, val_f, val_m, val_b, indx_f, indx_m0, indx_m1, indx_b, ):
if indx <= indx_m0:
slope = float(val_m - val_f) / ( indx_m0 - indx_f )
factor = val_f + slope*indx
elif indx <= indx_m1:
factor = val_m
else:
slope = float(val_b - val_m) / ( indx_b - indx_m1 )
factor = val_m + slope* (indx - indx_m1)
return factor
dataDir = '/data/groups/comp-astro/bruno/'
# dataDir = '/gpfs/alpine/proj-shared/ast149/'
nPoints = 2048
# size_front = 5120
size_front =int ( 2048 * 1.4 )
size_back = int (2048 * 0.8 )
field = 'density'
inDir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/output_files_pchw18/'.format(nPoints)
if field == 'density': output_dir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/projections_pchw18/dm/projections_{1}_alpha_3/'.format(nPoints,size_front)
use_mpi = True
if use_mpi :
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nprocs = comm.Get_size()
else:
rank = 0
nprocs = 1
nSnap = 169
if nprocs == 1: show_progess = True
else: show_progess = False
if rank == 0: show_progess = True
Lbox = 50000
proc_grid = [ 8, 8, 8]
box_size = [ Lbox, Lbox, Lbox ]
grid_size = [ 2048, 2048, 2048 ]
domain = get_domain_block( proc_grid, box_size, grid_size )
n_depth = 512
# n_per_run = 1
# index_start_range = range( index*n_per_run, (index+1)*n_per_run)
if rank == 0: create_directory( output_dir )
n_index_total = nPoints
n_proc_snaps= (n_index_total-1) // nprocs + 1
index_start_range = np.array([ rank + i*nprocs for i in range(n_proc_snaps) ])
index_start_range = index_start_range[ index_start_range < n_index_total ]
if len(index_start_range) == 0: exit()
if not use_mpi: index_start_range = [0]
print('Generating: {0} {1}\n'.format( rank, index_start_range))
data_type = 'particles'
indx_start = 0
for i, indx_start in enumerate(index_start_range):
# if indx_start > 0: continue
print("Index: {0}".format(indx_start))
grid_complete_size = [ 2048, 2048, 2048 ]
subgrid_x = [ indx_start, indx_start + n_depth ]
subgrid_y = [ 0, 2048 ]
subgrid_z = [ 0, 2048 ]
subgrid = [ subgrid_x, subgrid_y, subgrid_z ]
precision = np.float32
data_snapshot = load_snapshot_data_distributed_periodix_x( nSnap, inDir, data_type, field, subgrid, domain, precision, proc_grid, grid_complete_size, show_progess=show_progess )
if data_type == 'particles': current_z = data_snapshot['current_z']
if data_type == 'hydro': current_z = data_snapshot['Current_z']
data = data_snapshot[data_type][field]
if field == 'density':
clip_max = 2116267.2/10
clip_min = 0
data = np.clip(data, clip_min, clip_max)
if show_progess: print('')
size_original = ( nPoints, nPoints )
size_all = np.linspace( size_front, size_back, n_depth).astype(np.int)
size_output = np.array([2160, 3840 ])
projection_color = np.zeros( size_output )
projection_distance = np.zeros( size_output )
projection_alpha = np.zeros( size_output )
distance_factor_list = []
for indx_x in range(n_depth):
slice_original = data[indx_x]
size_slice = size_all[indx_x]
slice_rescaled = get_rescaled_image( slice_original, size_slice, size_output )
transparency_factor = get_transparency_factor_linear( indx_x, 0.0, 1.0, 0.0, 0, 180, 256, n_depth)
# transparency_factor = get_transparency_factor_linear( indx_x, 0.0, 1.0, 0.0, 0, 256, 256+128, n_depth)
slice_masked = slice_rescaled.copy()
min_dens_mask = 1
slice_masked = np.clip( slice_masked, a_min=min_dens_mask, a_max=None)
projection_alpha += np.log10(slice_masked) * transparency_factor**3
distance_factor = (transparency_factor)**(2)
projection_color += slice_rescaled
projection_distance += slice_rescaled * distance_factor
distance_factor_list.append(distance_factor)
if show_progess:
terminalString = '\r Slice: {0}/{1} distance_factor:{2} transparecy:{3}'.format(indx_x, n_depth, distance_factor, transparency_factor )
sys.stdout. write(terminalString)
sys.stdout.flush()
if show_progess: print("")
#Write the projection to a file:
n_image = indx_start
out_file_name = output_dir + 'projection_{2}_{3}_{0}_{1}.h5'.format( nSnap, n_image, data_type, field )
out_file = h5.File( out_file_name, 'w')
out_file.attrs['current_z'] = current_z
group_type = out_file.create_group( data_type )
group_field = group_type.create_group( field )
data_set = group_field.create_dataset( 'color', data= projection_color )
data_set.attrs['max'] = projection_color.max()
data_set.attrs['min'] = projection_color.min()
data_set = group_field.create_dataset( 'distance', data= projection_distance )
data_set.attrs['max'] = projection_distance.max()
data_set.attrs['min'] = projection_distance.min()
data_set = group_field.create_dataset( 'alpha', data= projection_alpha )
data_set.attrs['max'] = projection_alpha.max()
data_set.attrs['min'] = projection_alpha.min()
out_file.close()
print("Saved File {0} / {1}: {2}\n".format(i, len(index_start_range), out_file_name ))
|
[
"[email protected]"
] | |
5b2179a2439730b1162882adb56bcf9da6062535
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2008.1/applications/games/simutrans-waste/actions.py
|
9fcfaccbd3668b1ec6905a88338d406b19946b22
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 627 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import pisitools
import os
def fixperms(d):
for root, dirs, files in os.walk(d):
for name in dirs:
shelltools.chmod(os.path.join(root, name), 0755)
for name in files:
shelltools.chmod(os.path.join(root, name), 0644)
WorkDir = "simutrans"
NoStrip = "/"
def install():
fixperms("pak")
pisitools.insinto("/usr/share/simutrans/pak", "pak/*")
|
[
"[email protected]"
] | |
9ce8ba8ed02d447b630541ebffebfeb1900a1f8a
|
83c62d10899b4e5d7028915a9f727bcdec0f861c
|
/reports/tests.py
|
e868d5496f433f94b227b269b675d01567a51c08
|
[
"Apache-2.0"
] |
permissive
|
meetjitesh/cito_engine
|
1b70139d762a9540bc40d2544ff4c9884de4eef3
|
ae63ff1147fa1c536dc2673e873768e5624b40bb
|
refs/heads/master
| 2020-12-26T10:45:52.137856 | 2014-05-18T14:19:38 | 2014-05-18T14:19:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 618 |
py
|
"""Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
# Create your tests here.
|
[
"[email protected]"
] | |
64217402f5ba7531d6d16c011e9923000e824b9a
|
919e74f05976d9ea5f28d5dcf0a3e9311a4d22b2
|
/conans/test/integration/cache/test_home_special_char.py
|
9c8ad39a70060a614aa24ffe500e1e6c0b312817
|
[
"MIT"
] |
permissive
|
thorsten-klein/conan
|
1801b021a66a89fc7d83e32100a6a44e98d4e567
|
7cf8f384b00ba5842886e39b2039963fc939b00e
|
refs/heads/develop
| 2023-09-01T12:04:28.975538 | 2023-07-26T10:55:02 | 2023-07-26T10:55:02 | 150,574,910 | 0 | 0 |
MIT
| 2023-08-22T14:45:06 | 2018-09-27T11:16:48 |
Python
|
UTF-8
|
Python
| false | false | 2,319 |
py
|
import os
import platform
import pytest
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
import textwrap
_path_chars = "päthñç$"
@pytest.fixture(scope="module")
def client_with_special_chars():
""" the path with special characters is creating a conanbuild.bat that fails
"""
cache_folder = os.path.join(temp_folder(), _path_chars)
current_folder = os.path.join(temp_folder(), _path_chars)
c = TestClient(cache_folder, current_folder)
tool = textwrap.dedent(r"""
import os
from conan import ConanFile
from conan.tools.files import save, chdir
class Pkg(ConanFile):
name = "mytool"
version = "1.0"
def package(self):
with chdir(self, self.package_folder):
echo = "@echo off\necho MYTOOL WORKS!!"
save(self, "bin/mytool.bat", echo)
save(self, "bin/mytool.sh", echo)
os.chmod("bin/mytool.sh", 0o777)
""")
c.save({"conanfile.py": tool})
c.run("create .")
conan_file = textwrap.dedent("""
import platform
from conan import ConanFile
class App(ConanFile):
name="failure"
version="0.1"
settings = 'os', 'arch', 'compiler', 'build_type'
generators = "VirtualBuildEnv"
tool_requires = "mytool/1.0"
apply_env = False # SUPER IMPORTANT, DO NOT REMOVE
def build(self):
mycmd = "mytool.bat" if platform.system() == "Windows" else "mytool.sh"
self.run(mycmd)
""")
c.save({"conanfile.py": conan_file})
return c
def test_reuse_buildenv(client_with_special_chars):
c = client_with_special_chars
# Need the 2 profile to work correctly buildenv
c.run("create . -s:b build_type=Release")
assert _path_chars in c.out
assert "MYTOOL WORKS!!" in c.out
@pytest.mark.skipif(platform.system() != "Windows", reason="powershell only win")
def test_reuse_buildenv_powershell(client_with_special_chars):
c = client_with_special_chars
c.run("create . -s:b build_type=Release -c tools.env.virtualenv:powershell=True")
assert _path_chars in c.out
assert "MYTOOL WORKS!!" in c.out
|
[
"[email protected]"
] | |
2fd3bcd0b1e0a265de56a4a6fcf68eb7015bb7eb
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_6/sngakh004/question4.py
|
64ef79e9395c406a90e97ab814688ab603b0c013
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 985 |
py
|
"""Akhil Singh
SNGAKH004
Program to draw a histogram of class mark data.
23 April 2014"""
#Get class marks from user
class_mark=input("Enter a space-separated list of marks:\n")
#Converting a string into a list of intergers
class_mark=class_mark.split()
for a in range(len(class_mark)):
class_mark[a]=eval(class_mark[a])
#Specifying acumulators for variables
first = 0
upper_second = 0
lower_second = 0
third = 0
fail = 0
#Determining the place of marks
for m in class_mark:
if m >= 75:
first = first +1
elif m >= 70:
upper_second =upper_second + 1
elif m >= 60:
lower_second =lower_second+ 1
elif m >= 50:
third=third + 1
else:
fail=fail+ 1
#Print histogram.
print("1 |", "X"*first, sep = "")
print("2+|", "X"*upper_second, sep = "")
print("2-|", "X"*lower_second, sep = "")
print("3 |", "X"*third, sep = "")
print("F |", "X"*fail, sep = "")
|
[
"[email protected]"
] | |
4370fe568105d6947b4b17e170453c6a279fbf30
|
3f3471faa4a693fb18adb651500753f637ab1916
|
/book.py
|
65bdcf54cb5837e194d347af766617add40fcbf4
|
[] |
no_license
|
sarte3/pj5
|
1f7a54fc5a6e78a3e803416787ea32c3626761cd
|
afdb2dd49292c8647f9dec5a061a8d4f1bba06d7
|
refs/heads/master
| 2023-03-06T04:11:22.607225 | 2021-02-25T05:17:36 | 2021-02-25T05:17:36 | 342,131,195 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,384 |
py
|
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
# 한글처리
from matplotlib import font_manager, rc
fontname = font_manager.FontProperties(fname='malgun.ttf').get_name()
rc('font', family=fontname)
import json
import os
import sys
import urllib.request
client_id = "twrhEE4LU8HoKxIrMwzM"
client_secret = "hsPSocfNRK"
encText = urllib.parse.quote("파이썬")
url = "https://openapi.naver.com/v1/search/book.json?display=100&query=" + encText # json 결과
# url = "https://openapi.naver.com/v1/search/blog.xml?query=" + encText # xml 결과
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id",client_id)
request.add_header("X-Naver-Client-Secret",client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if(rescode==200):
response_body = response.read()
result = response_body.decode('utf-8')
else:
print("Error Code:" + rescode)
dic = json.loads(result)
# print(dic)
# 1) 네이버 개발자 센터에서 파이썬 책을 검색하여
# 책 제목, 출판사, 가격, isbn열을 데이터프레임 df로 생성하세요.
items = dic['items']
df=pd.DataFrame(items)
df = df[['title', 'publisher', 'price', 'isbn']]
print(df)
# 2) 출판사별 가격의 평균을 출력하세요
df['price']=pd.to_numeric(df['price'])
g1 = df.groupby('publisher')['price'].mean()
print(g1)
|
[
"[email protected]"
] | |
e548f54d3a7c8da2d51938e1da6415835d9d5685
|
94f156b362fbce8f89c8e15cd7687f8af267ef08
|
/midterm/main/permissions.py
|
57ee4beb272ac8cfc9255ea149ac2f53b1a21476
|
[] |
no_license
|
DastanB/AdvancedDjango
|
6eee5477cd5a00423972c9cc3d2b5f1e4a501841
|
2b5d4c22b278c6d0e08ab7e84161163fe42e9a3f
|
refs/heads/master
| 2020-07-17T19:21:16.271964 | 2019-12-03T21:58:51 | 2019-12-03T21:58:51 | 206,081,522 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,228 |
py
|
from rest_framework.permissions import IsAuthenticated, BasePermission
from users.models import MainUser
class ProductPermission(BasePermission):
message = 'You must be the authenticated.'
def has_permission(self, request, view):
if view.action is 'create':
return request.user.is_superuser or request.user.is_store_admin
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
if not request.user.is_authenticated:
return False
if view.action is not 'retrieve':
return request.user.is_superuser or request.user.is_store_admin
return True
class ServicePermission(BasePermission):
message = 'You must be authenticated.'
def has_permission(self, request, view):
if view.action is 'create':
return request.user.is_superuser or request.user.is_store_admin
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
if not request.user.is_authenticated:
return False
if view.action is not 'retrieve':
return request.user.is_superuser or request.user.is_store_admin
return True
|
[
"[email protected]"
] | |
7bbe3d2066efd578b09eee3aee8ab3d57cc65694
|
c95310db610fd4ca1899903d3982d0733c2c693d
|
/problems/reverse_bits/solution.py
|
5fbfca10974c1a7d264cfe02ae82088da1d3273a
|
[] |
no_license
|
findcongwang/leetcode
|
e8cbc8899a6af2134de66d6ff1a1ead4c10d38e6
|
f52ea63fc0680613a3ebf3f3b4e4f1be7bbfd87c
|
refs/heads/main
| 2023-02-25T17:33:41.270128 | 2021-01-29T17:57:15 | 2021-01-29T17:57:15 | 306,998,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 221 |
py
|
class Solution:
def reverseBits(self, n: int) -> int:
count = 31
num = 0
while n > 0:
num = num + ((n&1) * 2**count)
n = n >> 1
count -= 1
return num
|
[
"[email protected]"
] | |
acc0a1be6531ff69b956ab61565fa4ebda0839ee
|
5cc94ad2377777067e7e9fad08823f201b790a63
|
/github-backup
|
94c4a4a40db48d1d39243f0149b93648e9e996ca
|
[] |
no_license
|
rrosajp/github-backup
|
86ea44a6addd24072cfe150de2be4c4a71310720
|
a538de0bd0a29b82f4cc669cf60132aa83548469
|
refs/heads/master
| 2022-03-17T23:15:25.856395 | 2019-11-27T19:02:35 | 2019-11-27T19:02:35 | 273,772,938 | 1 | 0 | null | 2020-06-20T19:37:44 | 2020-06-20T19:37:43 | null |
UTF-8
|
Python
| false | false | 4,511 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# github_backup.py
#
# Copyright 2017 Spencer McIntyre <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import fnmatch
import getpass
import os
import shutil
import sys
import git
import github
import yaml
__version__ = '1.0'
def _get_github(arguments, config):
if arguments.auth_token:
gh = github.Github(arguments.auth_token)
elif arguments.auth_user:
password = getpass.getpass("{0}@github.com: ".format(arguments.auth_user))
gh = github.Github(arguments.auth_user, password)
else:
gh = github.Github(config['token'])
return gh
def _backup_repo(repo_url, destination):
repo = None
if os.path.isdir(destination):
repo = git.Repo(destination)
for remote in repo.remotes:
remote.fetch()
else:
try:
repo = git.Repo.clone_from(repo_url, destination)
except git.exc.GitCommandError:
if os.path.isdir(destination):
shutil.rmtree(destination)
return repo
def backup_repo(repo, destination):
destination = os.path.join(destination, *repo.full_name.split('/'))
_backup_repo(repo.ssh_url, destination)
def backup_wiki(repo, destination):
destination = os.path.join(destination, *repo.full_name.split('/')) + '.wiki'
_backup_repo(repo.ssh_url[:-4] + '.wiki.git', destination)
def repo_matches_rules(rules, repo_slug, default=True):
for rule in rules:
result = True
if rule.startswith('!'):
result = False
rule = rule[1:]
if fnmatch.fnmatch(repo_slug, rule):
return result
return default
def main():
parser = argparse.ArgumentParser(description='GitHub Backup', conflict_handler='resolve')
parser.add_argument('-v', '--version', action='version', version='%(prog)s Version: ' + __version__)
auth_type_parser_group = parser.add_mutually_exclusive_group()
auth_type_parser_group.add_argument('--auth-token', dest='auth_token', help='authenticate to github with a token')
auth_type_parser_group.add_argument('--auth-user', dest='auth_user', help='authenticate to github with credentials')
parser.add_argument('--dry-run', action='store_true', default=False, help='do not backup any repositories')
parser.add_argument('config', type=argparse.FileType('r'), help='the configuration file')
arguments = parser.parse_args()
config = yaml.safe_load(arguments.config)
gh = _get_github(arguments, config)
user = gh.get_user()
repos = []
for repo in user.get_repos():
if not repo_matches_rules(config['rules'], repo.full_name):
continue
repos.append(repo)
print("rules matched {0:,} repositories".format(len(repos)))
destination = config['destination']
if not os.path.isdir(destination):
os.makedirs(destination)
width = len(str(len(repos)))
for idx, repo in enumerate(repos, 1):
print("[{0: >{width}}/{1: >{width}}] processing: {2}".format(idx, len(repos), repo.full_name, width=width))
if arguments.dry_run:
continue
backup_repo(repo, destination)
if 'wikis' in config['include'] and repo.has_wiki:
backup_wiki(repo, destination)
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"[email protected]"
] | ||
3fb43fde0c3c30ff359ad45502555447f39d8cdb
|
6543e5bfec2f2d89ac6a7e42e8f38d2f3df26615
|
/1306JumpGameIII.py
|
5545d7464d227a8cbade8389f59323cc7f68b298
|
[] |
no_license
|
adityachhajer/LeetCodeSolutions
|
bc3136fa638a3b2c7d35e419a063e9ce257bc1af
|
693fd3aef0f823f9c3cfc14c3d1010584b7a762d
|
refs/heads/master
| 2023-02-17T08:21:00.064932 | 2021-01-19T14:05:08 | 2021-01-19T14:05:08 | 270,973,258 | 2 | 1 | null | 2020-09-30T18:30:21 | 2020-06-09T10:33:39 |
Python
|
UTF-8
|
Python
| false | false | 532 |
py
|
class Solution:
def solve(self,arr,start,ans):
if start>=len(arr) or start<0:
return False
elif arr[start]==0:
# ans[start]=True
return True
elif ans[start]==1:
return False
else:
ans[start]=1
return self.solve(arr, start+arr[start], ans) or self.solve(arr,start-arr[start],ans)
def canReach(self, arr: List[int], start: int) -> bool:
ans = [0] * len(arr)
return self.solve(arr, start, ans)
|
[
"[email protected]"
] | |
fe1a0e8c9ddaa1a7fa94404a66e4dc8769440385
|
e2a006a139330bca613169e547e185b1a5048646
|
/L8/busca_punto.py
|
a31fad5cb4ef87ebcb323606529b9ccf720bee06
|
[] |
no_license
|
Mi7ai/EI1022
|
013d54d470b38c125503c0173b355cc7bc681784
|
f80d5b4f99e1427fd5c2673499a3eee0d1dfc1ca
|
refs/heads/master
| 2020-03-29T13:54:58.231532 | 2018-12-05T13:50:27 | 2018-12-05T13:50:27 | 149,988,593 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 976 |
py
|
def busca_punto_rec(v):
def rec(b: int, e:int ):
if e-b == 0: return None
h = (b+e)//2
if v[h] < h:
return rec(h,e)
elif v[h] > h:
return rec(b,h)
else:
return h
return rec(0, len(v))
#para acasa hacrlo sin recursividad
# def busca_punto_fijo(v):
# def rec(b: int, e:int ):
# #cond de parada del while if e-b == 0: return None
#
# #dentro del while
# h = (b+e)//2
# if v[h] < h:
# return rec(h,b)
# elif v[h] > h:
# return rec(b,h)
# else:
# return h
# #hasta aqui
# return rec(0, len(v))
def busca_punto_pico(v):
def rec(b: int, e: int):
if e-b == 1: return b
h = (b+e)//2 #mitad
if v[h] <= h:
return rec(h, e)
return rec(b,h)
return rec(0, len(v))
if __name__ == "__main__":
v = [-10, -5, 1, 15, 3, 6]
print(busca_punto_pico(v))
|
[
"[email protected]"
] | |
7e8185b954c7aad21024d466b6adf503e4442918
|
4664bb1572d1d9bb90f99a11016d0fbe9b28c632
|
/search/utils/constants.py
|
647ceca9fa4eb44b99e06938b96dd418393a4d69
|
[] |
no_license
|
prdx/Indexer
|
32cfbf05dfec595c00550322859260c15a2906e8
|
08b52734b293cb2f0a57ed74d818ece70425711f
|
refs/heads/master
| 2020-03-22T23:22:27.947273 | 2018-07-20T16:27:40 | 2018-07-20T16:27:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 494 |
py
|
class Constants:
CACHE_PATH = './cache/'
DATA_PATH = './AP_DATA/ap89_collection/'
DOC_TYPE = 'document'
DOCLIST_PATH = './AP_DATA/processed_doclist.txt'
ES_SCRIPTS_PATH = './es/'
INDEX_NAME = 'ap89_collection'
MAX_OUTPUT = 1000
RESULTS_PATH = './results/'
STOPWORDS_PATH = './AP_DATA/stoplist.txt'
USE_STEMMING = False
QUERY_LIST_PATH = './AP_DATA/query_desc.51-100.short.txt'
QUERY_LIST_PATH_FOR_PS = './AP_DATA/query_desc.51-100.short.ps.txt'
|
[
"[email protected]"
] | |
d3b38d9183e453184a9cbc3ba6574a778fc49705
|
74649c1220c68ad0af79e420d572e3769fcd7a53
|
/_unittests/ut_documentation/test_run_notebooks_onnx_viz.py
|
6a0613ce77a5e839c1437dc40f2a84f34aab0076
|
[
"MIT"
] |
permissive
|
sdpython/mlprodict
|
e62edcb428700cb2c4527e54e96431c1d2b36118
|
27d6da4ecdd76e18292f265fde61d19b66937a5c
|
refs/heads/master
| 2023-05-08T10:44:30.418658 | 2023-03-08T22:48:56 | 2023-03-08T22:48:56 | 112,469,804 | 60 | 13 |
MIT
| 2023-04-19T01:21:38 | 2017-11-29T11:57:10 |
Python
|
UTF-8
|
Python
| false | false | 991 |
py
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=30s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
from pyquickhelper.pycode import (
add_missing_development_version, ExtTestCase
)
import mlprodict
class TestNotebookOnnxViz(ExtTestCase):
def setUp(self):
add_missing_development_version(["jyquickhelper"], __file__, hide=True)
def test_notebook_onnx_vis(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
self.assertNotEmpty(mlprodict is not None)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks")
test_notebook_execution_coverage(__file__, "onnx_visualization", folder,
this_module_name="mlprodict", fLOG=fLOG)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
82a1fc92fe198a109b3287ee4730928c9d081839
|
1978a9455159b7c2f3286e0ad602652bc5277ffa
|
/exercises/04_data_structures/task_4_8.py
|
e089dd1a02e42d41758421e563cf86a42a9f841c
|
[] |
no_license
|
fortredux/py_net_eng
|
338fd7a80debbeda55b5915dbfba4f5577279ef0
|
61cf0b2a355d519c58bc9f2b59d7e5d224922890
|
refs/heads/master
| 2020-12-03T17:32:53.598813 | 2020-04-08T20:55:45 | 2020-04-08T20:55:45 | 231,409,656 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,337 |
py
|
# -*- coding: utf-8 -*-
'''
Задание 4.8
Преобразовать IP-адрес в двоичный формат и вывести на стандартный поток вывода вывод столбцами, таким образом:
- первой строкой должны идти десятичные значения байтов
- второй строкой двоичные значения
Вывод должен быть упорядочен также, как в примере:
- столбцами
- ширина столбца 10 символов
Пример вывода для адреса 10.1.1.1:
10 1 1 1
00001010 00000001 00000001 00000001
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
ip = '192.168.3.1'
ip_template = '''
{0:<10} {1:<10} {2:<10} {3:<10}
{0:010b} {1:010b} {2:010b} {3:010b}
'''
ip_split = ip.split('.')
a = int(ip_split[0]) # Нужно перевести в число,
b = int(ip_split[1]) # иначе будет ошибка,
c = int(ip_split[2]) # ведь в шаблоне для перевода в двоичную систему
d = int(ip_split[3]) # нужны числа, а не сторки
print(ip_template.format(a, b, c, d))
|
[
"[email protected]"
] | |
920f4452d90e699d289148b8978d08ac6d9ab071
|
ca9ab449331c31c1046ddbdd09a7079a65024a8a
|
/data_structure/dict/dictionary-basic.py
|
f66a2c66a35de603ed5c996bb9c524f11ebfc656
|
[] |
no_license
|
linth/learn-python
|
95947fd40f578d4473496c263ab45a7459fe4773
|
33c36b45c0dc46cf6d9a5d1237b63c8c2442b3fd
|
refs/heads/master
| 2023-04-28T17:08:06.473326 | 2023-04-24T17:09:41 | 2023-04-24T17:09:41 | 172,464,385 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,583 |
py
|
# dictionary: key and value.
dict = {
"name": "George",
"year": 2019,
"sex": "boy",
"weight": 78,
"height": 190,
}
print(dict)
# get value
print(dict['year']) # first way to get value.
x = dict.get('year') # another way to get value.
print(x) # 2019.
# change value
dict['year'] = 2000
print(dict['year']) # 2000.
# use for loop to show data
for i in dict.keys():
# show keys.
print('keys = ', i) # name, year, sex, weight, height.
for x in dict.values():
# show value.
print('values = ', x) # George, 2000, boy, 78, 190.
for x, y in dict.items():
print(x, y) # name George, year 2000, sex boy, weight 78, height 190.
# copy dictionary
x = dict.copy()
print(x) # {'name': 'George', 'year': 2000, 'sex': 'boy', 'weight': 78, 'height': 190}
# add items.
dict = {
"name": "George",
"year": 2019,
"sex": "boy",
"weight": 78,
"height": 190,
}
dict['name'] = 'Peter'
print(dict) # name Peter, year 2019, sex boy, weight 78, height 190.
# remove item
dict.pop('name')
print(dict) # year 2019, sex boy, weight 78, height 190.
z = dict.clear() # remove all data.
print(z) # return None.
# remove the lastinserted item
dict = {"year": 2019, "sex": "boy", "weight": 78, "height": 190}
dict.popitem()
print(dict) # year 2019, sex boy, weight 78
# remove the specified key name.
del dict['year']
print(dict) # sex boy, weight 78
# update, merge two dict
# it's for python 2.x and it's not support to python 3 up.
# dict1 = {"name": "George", "id": 1234}
# dict2 = {"name": "Peter", "id": 3456}
# print(dict1.update(dict2))
|
[
"[email protected]"
] | |
d348b2ed798851b3ca34bbfdcb624c6df23eb785
|
b107883be08ea56bd3a56ddb0e2dd8dacce7db2e
|
/src/polystar/pipeline/keras/trainer.py
|
db62a563eeff68041d0a316b49186f70f8caffa3
|
[] |
no_license
|
PolySTAR-mtl/cv
|
ef7977b62577e520f6c69a9b7891c7f38e307028
|
27564abe89e7dff612e3630c31e080fae4164751
|
refs/heads/master
| 2023-05-01T16:45:19.777459 | 2021-05-30T10:36:10 | 2021-05-30T10:36:10 | 356,053,312 | 0 | 0 | null | 2021-05-30T10:36:11 | 2021-04-08T21:32:06 |
Python
|
UTF-8
|
Python
| false | false | 1,427 |
py
|
from dataclasses import dataclass, field
from typing import List
from numpy.core._multiarray_umath import ndarray
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.models import Model
from polystar.pipeline.keras.compilation_parameters import KerasCompilationParameters
from polystar.pipeline.keras.data_preparator import KerasDataPreparator
from polystar.pipeline.keras.model_preparator import KerasModelPreparator
@dataclass
class KerasTrainer:
compilation_parameters: KerasCompilationParameters
callbacks: List[Callback]
data_preparator: KerasDataPreparator
model_preparator: KerasModelPreparator = field(default_factory=KerasModelPreparator)
max_epochs: int = 300
verbose: int = 0
def train(
self,
model: Model,
train_images: ndarray,
train_labels: ndarray,
validation_images: ndarray,
validation_labels: ndarray,
):
model = self.model_preparator.prepare_model(model)
model.compile(**self.compilation_parameters.__dict__)
train_data, steps = self.data_preparator.prepare_training_data(train_images, train_labels)
model.fit(
x=train_data,
validation_data=(validation_images, validation_labels),
steps_per_epoch=steps,
epochs=self.max_epochs,
callbacks=self.callbacks,
verbose=self.verbose,
)
|
[
"[email protected]"
] | |
02ae2bc23f3e2eb1ae2eaa25d2b06540fd152327
|
109a8fb3e17ccf1efa6057d59441933ce678c2f0
|
/app/schemas/user_schema.py
|
39d4e7768ecc51aca1201755f784f6fcb9b635f7
|
[] |
no_license
|
oulabla/rest_test
|
8daac7db642adb72a07a9662920bcbfd7b750fbf
|
fad8eb7bd253271706518b3ba6f117997ba08dfa
|
refs/heads/main
| 2023-08-27T07:39:15.079957 | 2021-10-15T15:56:02 | 2021-10-15T15:56:02 | 416,887,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
from marshmallow import fields
from . import BaseSchema
from marshmallow_sqlalchemy import auto_field
from marshmallow.fields import Date
from marshmallow_sqlalchemy.fields import Nested
from app.models.user import User
from app.schemas.role_schema import RoleSchema
from app.schemas.phone_schema import PhoneSchema
class UserSchema(BaseSchema):
class Meta:
model=User
load_instance=True
dateformat = '%Y-%m-%dT%H:%M:%S'
id = auto_field()
name = auto_field()
additional_info = auto_field()
created_at = Date()
updated_at = Date()
roles = Nested(RoleSchema, many=True)
phones = Nested(PhoneSchema, many=True)
# def on_bind_field(self, field_name, field_obj):
# super().on_bind_field(field_name, field_obj)
|
[
"="
] |
=
|
4db4ead4d9e8fc01e987b14c56aca330364ea2b7
|
6920575dc95c6800fb8e8088719afa88931281b5
|
/tests/validator/test_validating_a_motion.py
|
8f6a6c4882be98bfdbdde320c887b6a363fa5e59
|
[] |
no_license
|
pipermerriam/ethereum-alarm-governance
|
091de3a3f72c95e097a24bd05a1d7ebe4738a01b
|
1b4a9a0355976f92e88396582a64fdfb50bbe858
|
refs/heads/master
| 2023-08-28T09:39:44.191755 | 2016-02-15T22:18:18 | 2016-02-15T22:18:18 | 47,724,489 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,704 |
py
|
import pytest
deploy_contracts = []
ONE_DAY = 24 * 60 * 60
@pytest.mark.parametrize(
"quorium,pass_percentage,debate_period,executor,expected",
(
(10000, 51, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(10001, 51, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(10000, 100, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(10000, 100, ONE_DAY + 1, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(9999, 100, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", False),
(10000, 50, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", False),
(10000, 51, ONE_DAY - 1, "0xd3cda913deb6f67967b99d67acdfa1712c293601", False),
(10000, 51, ONE_DAY, "0x0000000000000000000000000000000000000000", False),
)
)
def test_validation_with_acceptable_values(deploy_contract, contracts,
deploy_client, accounts,
get_log_data, deploy_coinbase,
Status, quorium, debate_period,
pass_percentage, executor,
expected):
validator = deploy_contract(contracts.Validator)
validator.setMinimumQuorum.s(10000)
validator.setMinimumPassPercentage.s(51)
validator.setMinimumDebatePeriod.s(ONE_DAY)
motion = deploy_contract(
contracts.Motion,
constructor_args=(deploy_coinbase,),
)
motion.configure.s(quorium, debate_period, pass_percentage, executor)
actual = validator.validate(motion._meta.address)
assert actual is expected
|
[
"[email protected]"
] | |
ce88bf7af10a89c8474dffc2a82e54e04c1d6a2b
|
56ffce29f0d27f83206e11870d95982c38524aae
|
/apweb/site/configure.py
|
e40a59ea7cfb45c55048ca631afb93938572dc5f
|
[] |
no_license
|
adamandpaul/apweb
|
cce365085e2ee58cfbc31544c5a7414e67ad56b4
|
b1bb81fa7d7b39f19e187462aa3447ff482b46af
|
refs/heads/master
| 2022-10-19T02:09:52.437906 | 2021-05-21T06:10:08 | 2021-05-21T06:10:08 | 201,398,036 | 0 | 3 | null | 2022-09-21T21:39:41 | 2019-08-09T05:41:06 |
Python
|
UTF-8
|
Python
| false | false | 1,316 |
py
|
# -*- coding:utf-8 -*-
from .password_login_provider import PasswordLoginProvider
from .resource import Site
import apweb.authentication
def site_factory(request):
"""Return a default site factory"""
return Site.from_request(request)
def get_user_for_unauthenticated_userid(request):
email = request.unauthenticated_userid
return request.site["users"].get_user_by_email(email)
def get_identifiers(request):
identifiers = [*apweb.authentication.get_identifiers(request)]
user = request.user
if user:
identifiers.append(("user_uuid", str(user.user_uuid)))
return identifiers
def get_roles(request):
roles = [*apweb.authentication.get_roles(request)]
user = request.user
if user:
roles.extend(user.assigned_roles)
roles.append('authenticated')
return roles
def includeme(config):
"""A site configureation"""
config.include("apweb")
config.add_request_method(site_factory, "site", reify=True)
config.add_request_method(get_user_for_unauthenticated_userid, "user", reify=True)
config.add_request_method(get_identifiers, "identifiers", reify=True)
config.add_request_method(get_roles, "roles", reify=True)
config.register_login_provider(PasswordLoginProvider())
config.include(".view")
config.commit()
|
[
"[email protected]"
] | |
c948ad237c590dfd89ae5e491450ac4895fbb550
|
fa82dad9e83206d4630a55141bf44f50cbf0c3a8
|
/day1_python/01_python200_src/181.py
|
20a43d86dda37f41451faaad66761d0cd6d16b34
|
[] |
no_license
|
jsh2333/pyml
|
8f8c53a43af23b8490b25f35f28d85f1087df28d
|
157dfa7cc2f1458f12e451691a994ac6ef138cab
|
refs/heads/master
| 2021-03-27T22:26:38.254206 | 2020-04-26T06:35:11 | 2020-04-26T06:35:11 | 249,114,580 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 601 |
py
|
def countBirths():
ret = []
for y in range(1880, 2015):
count = 0
filename = 'names/yob%d.txt' %y
with open(filename, 'r') as f:
data = f.readlines()
for d in data:
if d[-1] == '\n':
d = d[:-1]
birth = d.split(',')[2]
count += int(birth)
ret.append((y, count))
return ret
result = countBirths()
with open('birth_by_year.csv', 'w') as f:
for year, birth in result:
data = '%s,%s\n' %(year, birth)
print(data)
f.write(data)
|
[
"[email protected]"
] | |
81e1ff36d764ebf2d6e179a45efa2e2ac9a119f4
|
71a7ed5dd56b89aea1795a3db1469e3006be783f
|
/configs/body/2d_kpt_sview_rgb_img/deeppose/coco/res50_coco_256x192.py
|
2d2b20fa49d0e23356d688081388809183364474
|
[
"Apache-2.0"
] |
permissive
|
RunningLeon/mmpose
|
b6091416a2565778a1438b23edaa6620a2088a3e
|
9ef202c0a4b2c28add32b530caf2ea0354843fe4
|
refs/heads/master
| 2023-06-30T04:46:52.917877 | 2021-08-05T08:54:24 | 2021-08-05T08:54:24 | 335,275,684 | 0 | 0 |
Apache-2.0
| 2021-02-02T12:06:57 | 2021-02-02T12:06:57 | null |
UTF-8
|
Python
| false | false | 3,956 |
py
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='torchvision://resnet50',
backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(3, )),
neck=dict(type='GlobalAveragePooling'),
keypoint_head=dict(
type='DeepposeRegressionHead',
in_channels=2048,
num_joints=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='SmoothL1Loss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(flip_test=True))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTargetRegression'),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
[
"[email protected]"
] | |
86fcf6c74099436224d26ca4ccd9c27e498fcc0c
|
11f7add72635ad985b3e98fd77e9426e8c74ab08
|
/growthintel/growthintel.py
|
7d641b9bad3fbb521fa21f28b0a5e9ec17603326
|
[] |
no_license
|
harshdarji/python
|
afa6b11338504567ece8bb1e78e841d13716ff14
|
8bad854304f423264b7b0724b87c7cd7de748cd6
|
refs/heads/master
| 2020-12-31T01:48:04.439466 | 2012-09-13T09:22:58 | 2012-09-13T09:22:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,219 |
py
|
APIKey="00a5f2164e5e49b29981ddcbe9df7d7c"
import parse
import json
output={"type":"FeatureCollection","features":[]}
postcodes=["AB","AL","B","BA","BB","BD","BH","BL","BN","BR","BS","BT","CA","CB","CF","CH","CM","CO","CR","CT","CV","CW","DA","DD","DE","DG","DH","DL","DN","DT","DY","E","EC","EH","EN","EX","FK","FY","G","GL","GU","HA","HD","HG","HP","HR","HS","HU","HX","IG","IM","IP","IV","KA","KT","KW","KY","L","LA","LD","LE","LL","LN","LS","LU","M","ME","MK","ML","N","NE","NG","NN","NP","NR","NW","OL","OX","PA","PE","PH","PL","PO","PR","RG","RH","RM","S","SA","SE","SG","SK","SL","SM","SN","SO","SP","SR","SS","ST","SW","SY","TA","TD","TF","TN","TQ","TR","TS","TW","UB","W","WA","WC","WD","WF","WN","WR","WS","WV","YO","ZE","BR","CR","DA","E","EC","EN","HA","IG","KT","N","NW","RM","SE","SM","SW","TW","UB","W","WC","WD"]
stuburl="http://geocoding.cloudmade.com/"+APIKey+"/geocoding/v2/find.js?query=postcode:"
urlend="&return_geometry=true"
for postcode in postcodes:
url=stuburl+postcode+urlend;
data=json.loads(htmlify(url))
print postcode,len(url)
if ("features" in data):
output["features"].append(data["features"][0])
else:
print "no data found for "+postcode
|
[
"[email protected]"
] | |
32d654a348320c21302268c73278d97e9540c221
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES2/NV/framebuffer_blit.py
|
ada66373b0f3e2262f88cc60a536d0ce0840c18b
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,008 |
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# End users want this...
from OpenGL.raw.GLES2 import _errors
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
_EXTENSION_NAME = 'GLES2_NV_framebuffer_blit'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_NV_framebuffer_blit',
error_checker=_errors._error_checker)
GL_DRAW_FRAMEBUFFER_BINDING_NV = _C('GL_DRAW_FRAMEBUFFER_BINDING_NV', 0x8CA6)
GL_DRAW_FRAMEBUFFER_NV = _C('GL_DRAW_FRAMEBUFFER_NV', 0x8CA9)
GL_READ_FRAMEBUFFER_BINDING_NV = _C('GL_READ_FRAMEBUFFER_BINDING_NV', 0x8CAA)
GL_READ_FRAMEBUFFER_NV = _C('GL_READ_FRAMEBUFFER_NV', 0x8CA8)
@_f
@_p.types(None, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLbitfield,
_cs.GLenum)
def glBlitFramebufferNV(srcX0,srcY0,srcX1,srcY1,dstX0,dstY0,dstX1,dstY1,mask,filter):pass
|
[
"[email protected]"
] | |
c520dd7bddfc90caab08af4bb0a8517af01ac98a
|
0422bca79c61ee4630f7c762e77ca9780b05e3ff
|
/pattern/text/nl/parser/__init__.py
|
590127d45bbbde61c6675e815462d4cc4e8842d0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kkoch986/pattern
|
69ac11d12ea4fd2bb581e3343bca021cf6e46c92
|
db807c888dcd15f515afe31753c9b0345a11b542
|
refs/heads/master
| 2021-01-18T04:26:13.078631 | 2013-02-14T03:29:25 | 2013-02-14T03:29:25 | 8,186,866 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,508 |
py
|
#### PATTERN | NL | RULE-BASED SHALLOW PARSER ######################################################
# Copyright (c) 2010 Jeroen Geertzen and University of Antwerp, Belgium
# Authors: Jeroen Geertzen (Dutch language model), Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
# The tokenizer, chunker and relation finder are inherited from pattern.en.parser.
# The tagger is based on Jeroen Geertzen's Dutch language model Brill-NL
# (brill-bigrams.txt, brill-contextual.txt, brill-lexical.txt, brill-lexicon.txt):
# http://cosmion.net/jeroen/software/brill_pos/
# Accuracy is reported around 92%, but Pattern scores may vary from Geertzen's original
# due to WOTAN => Penn Treebank mapping etc.
try:
from ...en.parser import Lexicon
from ...en.parser import PUNCTUATION, tokenize as _en_tokenize, parse as _en_parse, TaggedString
from ...en.parser import commandline
except:
import sys; sys.path.insert(0, os.path.join(MODULE, "..", ".."))
from en.parser import Lexicon
from en.parser import PUNCTUATION, tokenize as _en_tokenize, parse as _en_parse, TaggedString
from en.parser import commandline
#### TOKENIZER #####################################################################################
abbreviations = [
"a.d.h.v.", "afb.", "a.u.b.", "bv.", "b.v.", "bijv.", "blz.", "ca.", "cfr.", "dhr.", "dr.",
"d.m.v.", "d.w.z.", "e.a.", "e.d.", "e.g.", "enz.", "etc.", "e.v.", "evt.", "fig.", "i.e.",
"i.h.b.", "ir.", "i.p.v.", "i.s.m.", "m.a.w.", "max.", "m.b.t.", "m.b.v.", "mevr.", "min.",
"n.a.v.", "nl.", "n.o.t.k.", "n.t.b.", "n.v.t.", "o.a.", "ong.", "pag.", "ref.", "t.a.v.",
"tel.", "zgn."]
def tokenize(s, punctuation=PUNCTUATION, abbreviations=abbreviations, replace={"'n": " 'n"}):
# 's in Dutch preceded by a vowel indicates plural ("auto's"): don't replace.
s = _en_tokenize(s, punctuation, abbreviations, replace)
s = [re.sub(r"' s (ochtends|morgens|middags|avonds)", "'s \\1", s) for s in s]
return s
_tokenize = tokenize
#### LEMMATIZER ####################################################################################
# Word lemmas using singularization and verb conjugation from the inflect module.
try:
from ..inflect import singularize, conjugate, predicative
except:
try:
sys.path.append(os.path.join(MODULE, ".."))
from inflect import singularize, conjugate, predicative
except:
try:
from pattern.nl.inflect import singularize, conjugate, predicative
except:
singularize = lambda w: w
conjugate = lambda w, t: w
predicative = lambda w: w
def lemma(word, pos="NN"):
if pos == "NNS":
return singularize(word)
if pos.startswith(("VB","MD")):
return conjugate(word, "infinitive") or word
if pos.startswith("JJ") and word.endswith("e"):
return predicative(word)
return word
def find_lemmata(tagged):
for token in tagged:
token.append(lemma(token[0].lower(), pos=len(token) > 1 and token[1] or None))
return tagged
#### PARSER ########################################################################################
# pattern.en.find_tags() has an optional "lexicon" parameter.
# We'll pass the Dutch lexicon to it instead of the default English lexicon:
lexicon = LEXICON = Lexicon()
lexicon.path = os.path.join(MODULE, "brill-lexicon.txt")
lexicon.lexical_rules.path = os.path.join(MODULE, "brill-lexical.txt")
lexicon.contextual_rules.path = os.path.join(MODULE, "brill-contextual.txt")
lexicon.named_entities.tag = "N(eigen,ev)"
# WOTAN tagset:
# http://lands.let.ru.nl/literature/hvh.1999.2.ps
PENN = PENNTREEBANK = TREEBANK = "penntreebank"
WOTAN = "wotan"
wotan = {
"N(": [("eigen,ev","NNP"), ("eigen,mv","NNPS"), ("ev","NN"), ("mv","NNS")],
"V(": [("hulp","MD"), ("ott,3","VBZ"), ("ott","VBP"), ("ovt","VBD"), ("verldw","VBN"), ("tegdw","VBG"), ("imp","VB"), ("inf","VB")],
"Adj(": [("stell","JJ"), ("vergr","JJR"), ("overtr","JJS")],
"Adv(": [("deel","RP"), ("gew","RB"), ("pro","RB")],
"Art(": "DT",
"Conj(": "CC",
"Num(": "CD",
"Prep(": [("voorinf","TO"), ("", "IN")],
"Pron(": [("bez","PRP$"), ("","PRP")],
"Punc(": [("komma",","), ("haakopen","("), ("haaksluit",")"), ("",".")],
"Int": "UH",
"Misc": [("symbool","SYM"), ("vreemd","FW")]
}
def wotan2penntreebank(tag):
""" Converts a WOTAN tag to Penn Treebank II tag.
For example: bokkenrijders N(soort,mv,neut) => bokkenrijders/NNS
"""
for k,v in wotan.iteritems():
if tag.startswith(k):
if not isinstance(v, list):
return v
for a,b in v:
if a in tag.replace("_",""): return b
return tag
return tag
def parse(s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs):
""" Takes a string (sentences) and returns a tagged Unicode string.
Sentences in the output are separated by newlines.
"""
if tokenize:
s = _tokenize(s)
# Reuse the English parser:
kwargs.update({
"lemmata": False,
"light": False,
"lexicon": LEXICON,
"language": "nl",
"default": "N(soort,ev,neut)",
"map": kwargs.get("tagset", "") != WOTAN and wotan2penntreebank or None,
})
s = _en_parse(s, False, tags, chunks, relations, **kwargs)
# Use pattern.nl.inflect for lemmatization:
if lemmata:
p = [find_lemmata(sentence) for sentence in s.split()]
s = TaggedString(p, tags=s.tags+["lemma"], language="nl")
return s
def tag(s, tokenize=True, encoding="utf-8"):
""" Returns a list of (token, tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False, False, False, encoding).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
#### COMMAND LINE ##################################################################################
# From the folder that contains the "pattern" folder:
# python -m pattern.nl.parser xml -s "De kat wil wel vis eten maar geen poot nat maken." -OTCLI
if __name__ == "__main__":
commandline(parse)
|
[
"[email protected]"
] | |
cfd7b02747987b034cf4487a4f0d184a14a698da
|
43a4fd934407a963c876dc1b15fd7052e693592b
|
/GearBot/Util/RaidHandling/RaidShield.py
|
2920e7cee1e644e445fd3d504d578a43fb9c6170
|
[
"MIT"
] |
permissive
|
mrkirby153/GearBot
|
aa02fdd32bec292a54e8e3805bd829f16522cdc8
|
a61d936c12e906c7bcfaa840a585ee25ffc06138
|
refs/heads/master
| 2020-05-29T18:59:55.450101 | 2019-06-02T09:54:18 | 2019-06-02T09:54:18 | 189,318,104 | 0 | 0 |
MIT
| 2019-05-30T00:31:06 | 2019-05-30T00:31:06 | null |
UTF-8
|
Python
| false | false | 1,444 |
py
|
from Util import GearbotLogging
from Util.RaidHandling import RaidActions
class RaidShield:
def __init__(self, shield_info) -> None:
self.shield_name=shield_info["name"]
self.start_actions = [action for action in shield_info["actions"]["triggered"]]
self.raider_actions = [action for action in shield_info["actions"]["raider"]]
self.termination_actions = [action for action in shield_info["actions"]["terminated"]]
async def raid_detected(self, bot, guild, raid_id, raider_ids, shield):
GearbotLogging.log_to(guild.id, "raid_shield_triggered", raid_id=raid_id, name=self.shield_name)
await self.handle_actions(self.start_actions, bot, guild, raid_id, raider_ids, shield)
async def handle_raider(self, bot, raider, raid_id, raider_ids, shield):
await self.handle_actions(self.raider_actions, bot, raider, raid_id, raider_ids, shield)
async def shield_terminated(self, bot, guild, raid_id, raider_ids, shield):
GearbotLogging.log_to(guild.id, "raid_shield_terminated", raid_id=raid_id, name=self.shield_name)
await self.handle_actions(self.termination_actions, bot, guild, raid_id, raider_ids, shield)
async def handle_actions(self, actions, bot, o, raid_id, raider_ids, shield):
for a in actions:
action = RaidActions.handlers[a["type"]]
await action.execute(bot, o, a["action_data"], raid_id, raider_ids, shield)
|
[
"[email protected]"
] | |
c35ac0e38e1bf86dd30321937444199e6b545717
|
478009111504d54f45490f71c1e61190ecfede3f
|
/sesion_4/test_xl2.py
|
54fc999f79dc94a336ef6161315e7708b6b13766
|
[] |
no_license
|
BeatrizInGitHub/python-sci
|
9938c406e9642385d602971e985668765dfabbfa
|
d8ed9656184b5f08d41430ed5a770dc6f4550893
|
refs/heads/master
| 2020-05-22T19:46:08.065601 | 2017-02-26T02:15:31 | 2017-02-26T02:15:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
from openpyxl import load_workbook
wb = load_workbook("Libro1.xlsx")
ws = wb["Hoja1"]
cells = ws["C4:F10"]
mat = []
for r in cells:
row = []
for cell in r:
x = cell.value
row.append(x)
mat.append(row)
print mat
x = ws["C4"].value
x = mat[0][0]
|
[
"[email protected]"
] | |
51cf140ce032eb19a30ee3990d6bf89971df2ab8
|
c1fcdd80101aeae0ba8b4ae0e229f58ed58fd41f
|
/testPredictor.py
|
fe7bea830d5e4f619513d78c2e0e5b9ab4ea4023
|
[] |
no_license
|
luizgh/ModelEvaluation
|
367432f0456052f539c61766b00f1f59bc5fa3a5
|
2933918276d0dcc42d484d9163c4b6c9ea32f26a
|
refs/heads/master
| 2016-09-06T04:15:11.140896 | 2014-03-19T00:07:46 | 2014-03-19T00:07:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 556 |
py
|
import unittest
from Predictor import Predictor
from TestUtils import *
class testPredictor(unittest.TestCase):
def testAll(self):
logProbabilities = numpy.asarray([[0.4, 0.80, 0.50],
[0.45, 0.4, 0.41],
[0.4, 0.41, 0.45]])
expected = [1,0,2]
target = Predictor()
self.assertEquals(expected, target.getPredictions(logProbabilities))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a0b726ca16865f5077e8d0a563538ac2f6752e45
|
874153914c2784eb164c271e4a68c01265e5ce7f
|
/tool/module/arkon_config.py
|
a6a18adaf5c5f245e552d73e4e0c95d158b9cff6
|
[
"Apache-2.0"
] |
permissive
|
random-archer/mkinitcpio-systemd-tool
|
3c6f572244fb84e9eab4fa1291d4e5526a614751
|
5685070783975c934c517d603bcfd614f8b194a3
|
refs/heads/master
| 2023-08-17T20:52:13.040297 | 2022-12-21T14:20:10 | 2022-12-21T14:20:10 | 59,916,137 | 108 | 29 |
NOASSERTION
| 2023-08-13T09:35:00 | 2016-05-28T22:00:58 |
Python
|
UTF-8
|
Python
| false | false | 2,750 |
py
|
#!/usr/bin/env python
#
# shared config for build/setup/verify cycle
#
import os
import datetime
import nspawn
# detect build system
def has_ci_azure():
return "AZURE_EXTENSION_DIR" in os.environ
# no http proxy in azure
if has_ci_azure():
nspawn.CONFIG['proxy']['use_host_proxy'] = 'no'
nspawn.CONFIG['proxy']['use_machine_proxy'] = 'no'
# report state of non-bufered stdout/stderr
print(f"### PYTHONUNBUFFERED={os.environ.get('PYTHONUNBUFFERED', None)}")
# location of machine resources
nspawn_store = nspawn.CONFIG['storage']["root"]
# location of this module
this_dir = os.path.dirname(os.path.abspath(__file__))
# arch linux archive iso image date
build_epoch = datetime.datetime(year=2020, month=3, day=1)
# kernel version used by arch linux archive iso
kernel_version = f"5.5.6-arch1-1"
# current user account
host_user = os.getenv('USER', "root")
# azure or local resource location
user_home = os.getenv('HOME', "/root")
# location of source repository
project_repo = os.popen("git rev-parse --show-toplevel").read().strip()
# location of disk mount shared host/machine
# contains output of mkinitcpio: vmlinuz, linux-initramfs.img
project_boot = f"{project_repo}/boot"
# location of disk mount shared host/machine
# contains extracted content of linux-initramfs.img
project_data = f"{project_repo}/data"
# location of sysroot produced by this tool (with azure cache)
media_store = f"{nspawn_store}/resource/media"
# location of images produced by this tool (with azure cache)
image_store = f"{nspawn_store}/resource/image"
#
# container definitions
#
# image base for unit test
base_machine = "arch-base"
base_image_path = f"{image_store}/{base_machine}/default.tar.gz"
base_image_url = f"file://localhost/{base_image_path}"
# unit test: cryptsetup
cryptsetup_machine = "test-cryptsetup"
cryptsetup_image_path = f"{image_store}/{cryptsetup_machine}/default.tar.gz"
cryptsetup_image_url = f"file://localhost/{cryptsetup_image_path}"
# unit test: dropbear
dropbear_machine = "test-dropbear"
dropbear_image_path = f"{image_store}/{dropbear_machine}/default.tar.gz"
dropbear_image_url = f"file://localhost/{dropbear_image_path}"
# unit test: tinysshd
tinysshd_machine = "test-tinysshd"
tinysshd_image_path = f"{image_store}/{tinysshd_machine}/default.tar.gz"
tinysshd_image_url = f"file://localhost/{tinysshd_image_path}"
# unit test: nftables
nftables_machine = "test-nftables"
nftables_image_path = f"{image_store}/{nftables_machine}/default.tar.gz"
nftables_image_url = f"file://localhost/{nftables_image_path}"
# unit test: anything else
unitada_machine = "test-unitada"
unitada_image_path = f"{image_store}/{unitada_machine}/default.tar.gz"
unitada_image_url = f"file://localhost/{unitada_image_path}"
|
[
"[email protected]"
] | |
9b66a0e02508ee60fbd50d1de623179d0ef4f34b
|
9cec93a18ea94504947820205d0faae4d67ecd8d
|
/H2TauTau/python/eventContent/common_cff.py
|
cd998b53c2a17602f3659cbf83da20caa949c0ea
|
[] |
no_license
|
DESY-CMS-SUS/cmgtools-lite
|
de88b1d5dc20a925ed5b7c7be69fa3ef677955c6
|
db52d50047178563a0eb7f5858ae100aa408ec68
|
refs/heads/8_0_25
| 2021-05-23T04:36:22.900460 | 2017-11-09T10:32:41 | 2017-11-09T10:32:41 | 60,184,794 | 3 | 9 | null | 2021-02-17T23:22:12 | 2016-06-01T14:37:18 |
Python
|
UTF-8
|
Python
| false | false | 1,693 |
py
|
import copy
common = [
# 'drop *',
'keep double_fixedGridRho*_*_*',
'keep edmTriggerResults_TriggerResults_*_*',
'keep patPackedTriggerPrescales_*_*_*',
'keep patElectrons_slimmedElectrons_*_*',
'keep patJets_slimmedJets_*_*',
'keep patJets_patJetsReapplyJEC_*_*',
'keep patMETs_slimmedMETs_*_*',
'keep patMuons_slimmedMuons_*_*',
# 'keep patPacked*_*_*_*',
'keep patPackedCandidate*_*packedPFCandidates*_*_*', # RIC: agreed to keep it to: 1. tau vtx 2. possibly compute isolations at analysis level
'keep patTaus_slimmedTaus_*_*',
'keep patTrigger*_*_*_*',
'keep recoVertexs_*_*_*',
'keep cmgMETSignificances_*_*_*',
'keep patCompositeCandidates_cmg*CorSVFitFullSel_*_H2TAUTAU',
'keep patJets_patJetsAK4PF_*_*',
'keep PileupSummaryInfos_*_*_*',
'keep recoGenParticles_prunedGenParticles_*_*',
'keep patPackedGenParticles_packedGenParticles__*', # these are status 1
'keep recoGsfElectronCores_*_*_*', # needed?
'keep recoSuperClusters_*_*_*', # for electron MVA ID
'keep recoGenJets_slimmedGenJets_*_*',
'keep *_slimmedSecondaryVertices_*_*',
'keep patPackedCandidates_packedPFCandidates__*',
'keep *_puppi_*_*',
'keep *_slimmedMETsPuppi_*_*',
'keep *_generator_*_*',
'keep *_genEvtWeightsCounter_*_H2TAUTAU',
'keep *_offlineBeamSpot_*_*',
'keep *_reducedEgamma_reducedConversions_*',
'keep LHEEventProduct_*_*_*',
'keep *_l1extraParticles_*_*',
# 'keep *_mvaMETTauMu_*_H2TAUTAU'
]
commonDebug = copy.deepcopy(common)
commonDebug.extend([
'keep patCompositeCandidates_*_*_*', # keep all intermediate di-taus
'keep patElectrons_*_*_*'
])
|
[
"[email protected]"
] | |
2e29fe62e754d295f00d590bcd2ce5ca0afddcf2
|
5ef2ebd4334769955cff15a03f5956b0fac6ba52
|
/docs/conf.py
|
15332c991b4e2357f380f776c35bd8c2c007fe0e
|
[
"BSD-3-Clause-Modification"
] |
permissive
|
smurfix/repoze.retry
|
b58bf0b1e96e995d8631e03d1eb51cea84bb2a3c
|
0b7eae20b2ae29180bc36a5549ae9e54e5b6a7bd
|
refs/heads/master
| 2021-01-17T08:00:17.562716 | 2013-12-03T09:16:39 | 2013-12-03T09:20:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,741 |
py
|
# -*- coding: utf-8 -*-
#
# repoze.retry documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 9 08:03:46 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
parent = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.abspath(parent))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'repoze.retry'
copyright = u'2010, Agendaless Consulting, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pkg_resources import require
dist = require('repoze.retry')[0]
version = dist.version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'repoze.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'repozeretrydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'repozeretry.tex', u'repoze.retry Documentation',
u'Agendaless Consulting, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
[
"[email protected]"
] | |
cf586d21d70ab55c9b7d5a8e1e146b94a0e7493b
|
4ba18540bfd8c523fe39bbe7d6c8fa29d4ec0947
|
/atlas/foundations_events/src/test/consumers/jobs/queued/test_queued_job_notifier.py
|
d0480777c4e91860a0d9142ea598f1e067386f54
|
[
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MPL-2.0"
] |
permissive
|
yottabytt/atlas
|
c9d8ef45a0921c9f46d3ed94d42342f11488a85e
|
b040e574fbc64c833039b003f8a90345dd98e0eb
|
refs/heads/master
| 2022-10-14T11:12:12.311137 | 2020-06-13T13:19:35 | 2020-06-13T13:19:35 | 272,008,756 | 0 | 0 |
Apache-2.0
| 2020-06-13T12:55:29 | 2020-06-13T12:55:28 | null |
UTF-8
|
Python
| false | false | 1,369 |
py
|
import unittest
from mock import Mock
from foundations_spec.helpers import *
from foundations_spec.helpers.spec import Spec
class TestQueuedJobNotifier(Spec):
job_notifier = let_mock()
@let
def consumer(self):
from foundations_events.consumers.jobs.queued.job_notifier import JobNotifier
return JobNotifier(self.job_notifier)
@let
def job_id(self):
from uuid import uuid4
return uuid4()
@let
def project_name(self):
return self.faker.sentence()
def test_call_sends_notification_with_qeueud_message(self):
time = 1551457960.22515
self.consumer.call({'job_id': self.job_id, 'project_name': self.project_name}, time, None)
self.job_notifier.send_message.assert_called_with(
"""
Job Queued
Job Id: {}
Timestamp: 2019-03-01 11:32:40.225150
Project Name: {}
""".format(self.job_id, self.project_name)
)
def test_call_sends_notification_with_qeueud_message_different_time_stamp(self):
time = 1551458381.9642663
self.consumer.call({'job_id': self.job_id, 'project_name': self.project_name}, time, None)
self.job_notifier.send_message.assert_called_with(
"""
Job Queued
Job Id: {}
Timestamp: 2019-03-01 11:39:41.964266
Project Name: {}
""".format(self.job_id, self.project_name)
)
|
[
"[email protected]"
] | |
eb8e03b50b9cf04c181f5fd214e1461f869755bb
|
beea119ff63911711c906ea82cd7c8fa0b33f149
|
/src/oscar/apps/dashboard/catalogue/mixins.py
|
b924a7dbbda53e107a9875e6c67e2c80b06ce6bc
|
[
"MIT"
] |
permissive
|
benjaminbills/fundizshop
|
6157316182ffa0045d68737315682c361f968ce9
|
b5d09f70c0a728c74efa72a37b03efef2bed9d3d
|
refs/heads/master
| 2023-06-21T19:05:08.120925 | 2021-07-13T05:00:15 | 2021-07-13T05:00:15 | 379,179,978 | 1 | 4 |
MIT
| 2021-07-07T09:18:29 | 2021-06-22T07:27:11 |
Python
|
UTF-8
|
Python
| false | false | 529 |
py
|
class PartnerProductFilterMixin:
def filter_queryset(self, queryset):
"""
Restrict the queryset to products the given user has access to.
A staff user is allowed to access all Products.
A non-staff user is only allowed access to a product if they are in at
least one stock record's partner user list.
"""
user = self.request.user
if user.is_staff:
return queryset
return queryset.filter(stockrecords__partner__users__pk=user.pk).distinct()
|
[
"[email protected]"
] | |
40ff46ac61dab69c55f35bf02697ae1ce5adca82
|
88841c4b8c8a2d8c12186105510d325ff84324a5
|
/scripts/artifacts/bluetoothOther.py
|
111a4684b9a7165e034b4f586b0c34ae67aa8dad
|
[
"MIT"
] |
permissive
|
mastenp/iLEAPP
|
404b8aca6b6bc6fab04240fdccf822839bade1e1
|
ee40ef7505b36d0b9b04131f284a9d4d036514a5
|
refs/heads/master
| 2022-12-26T07:55:17.905307 | 2020-09-29T11:58:16 | 2020-09-29T11:58:16 | 272,011,420 | 1 | 0 |
MIT
| 2020-09-29T11:58:17 | 2020-06-13T13:12:05 |
Python
|
UTF-8
|
Python
| false | false | 1,198 |
py
|
import glob
import os
import sys
import stat
import pathlib
import plistlib
import sqlite3
import json
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_bluetoothOther(files_found, report_folder, seeker):
file_found = str(files_found[0])
os.chmod(file_found, 0o0777)
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute(
"""
SELECT
Name,
Address,
LastSeenTime,
Uuid
FROM
OtherDevices
order by Name desc
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries > 0:
for row in all_rows:
data_list.append((row[0], row[1], row[3]))
description = ''
report = ArtifactHtmlReport('Bluetooth Other LE')
report.start_artifact_report(report_folder, 'Other LE', description)
report.add_script()
data_headers = ('Name','Address','UUID' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Bluetooth Other LE'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No data available for Bluetooth Other')
db.close()
return
|
[
"[email protected]"
] | |
d7f81f1b456b403d5cfaa53ae22d927009042b7d
|
8a73cde463081afd76427d5af1e6837bfa51cc47
|
/service/surf/apps/filters/metadata.py
|
49cdddb85d42d447a04530c4871e5959605a61e7
|
[
"MIT"
] |
permissive
|
surfedushare/search-portal
|
8af4103ec6464e255c5462c672b30f32cd70b4e1
|
63e30ad0399c193fcb686804062cedf3930a093c
|
refs/heads/acceptance
| 2023-06-25T13:19:41.051801 | 2023-06-06T13:37:01 | 2023-06-06T13:37:01 | 254,373,874 | 2 | 1 |
MIT
| 2023-06-06T12:04:44 | 2020-04-09T13:07:12 |
Python
|
UTF-8
|
Python
| false | false | 2,079 |
py
|
from collections import defaultdict
import requests
from django.conf import settings
from django.utils.functional import cached_property
class MetadataTree(object):
harvester_url = None
api_token = None
def __init__(self, harvester_url, api_token, warm_up_cache=False):
self.harvester_url = harvester_url
self.api_token = api_token
if warm_up_cache:
self._warm_up_cache = self.translations # result should be ignored as it only fills the cache
def _fetch(self, url):
response = requests.get(url, headers={"Authorization": f"Token {self.api_token}"})
if response.status_code != requests.status_codes.codes.ok:
raise ValueError(f"Failed request: {response.status_code}")
return response.json()
@cached_property
def tree(self):
return self._fetch(f"{self.harvester_url}metadata/tree/?site_id={settings.SITE_ID}")
@cached_property
def partial_tree(self):
return self._fetch(f"{self.harvester_url}metadata/tree/?site_id={settings.SITE_ID}&max_children=20")
@cached_property
def cache(self):
cache = defaultdict(dict)
def _cache_children(field_name, children):
for child in children:
cache[field_name][child["value"]] = child
_cache_children(field_name, child["children"])
for field in self.tree:
field_name = field["value"]
cache[field_name]["_field"] = field
_cache_children(field_name, field["children"])
return cache
@cached_property
def translations(self):
return {
field["value"]: {
value: child["translation"]
for value, child in self.cache[field["value"]].items()
}
for field in self.tree
}
def get_field(self, field_name):
return self.cache[field_name]["_field"]
def get_filter_field_names(self):
return [
field["value"]
for field in self.tree if not field["is_manual"]
]
|
[
"[email protected]"
] | |
3ea01b02f1d2247da82de5a03180b328416801ca
|
1a80c38ea020a8b18bb2c61b55caff8a38f553b9
|
/SWEA/trying/6058.py
|
647d2ed1e2f5a17a5950b75fb675a9047cc7a63c
|
[] |
no_license
|
jiwookseo/problem-solving
|
775a47825dc73f8a29616ef7011e8ee7be346f80
|
eefbefb21608ae0a2b3c75c010ae14995b7fc646
|
refs/heads/master
| 2020-04-19T03:11:02.659816 | 2019-08-14T08:59:06 | 2019-08-14T08:59:06 | 167,926,883 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 412 |
py
|
# 6058. 새해 축하 파티 D5
# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AWbHe_w6AHIDFAV0&categoryId=AWbHe_w6AHIDFAV0&categoryType=CODE
def gen():
a=0
for i in range(1,401):
a+=i
yield a
for tc in range(1,int(input())+1):
inp=list(map(int,input().split()))
b,l,k=3*inp[0],inp[1],inp[2]
genN=gen()
# 너무 어렵다. 그래프? 트리?
|
[
"[email protected]"
] | |
831d02112e7ceee36ced42de31fb8ccfb82dfac5
|
2247218311804a8e6ca0d07840ab918cd749376d
|
/tests/func/test_glr_parsing.py
|
00ca40063da92a24e7c3ae3f8c0c690526bf186a
|
[
"Python-2.0",
"MIT"
] |
permissive
|
morganjk/parglare
|
e48067ee99ead524d9bb311be80bc173e50ef5d2
|
39b8034d50d328844dfd91fcd94e23166056398f
|
refs/heads/master
| 2021-07-18T09:44:39.758588 | 2017-10-22T15:08:56 | 2017-10-22T15:08:56 | 107,910,367 | 1 | 0 | null | 2017-10-22T23:17:58 | 2017-10-22T23:17:58 | null |
UTF-8
|
Python
| false | false | 5,101 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from parglare import GLRParser, Grammar, Parser
from parglare.exceptions import SRConflicts
def test_lr2_grammar():
grammar = """
Model: Prods EOF;
Prods: Prod | Prods Prod;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g = Grammar.from_string(grammar)
# This grammar is not LR(1) as it requires
# at least two tokens of lookahead to decide
# what to do on each ID from the right side.
# If '=' is after ID than it should reduce "Prod"
# else it should reduce ID as ProdRefs.
with pytest.raises(SRConflicts):
Parser(g)
# But it can be parsed unambiguously by GLR.
p = GLRParser(g)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
results = p.parse(txt)
assert len(results) == 1
def test_expressions():
actions = {
"E": [
lambda _, nodes: nodes[0] + nodes[2],
lambda _, nodes: nodes[0] * nodes[2],
lambda _, nodes: nodes[1],
lambda _, nodes: int(nodes[0])
]
}
# This grammar is highly ambiguous if priorities and
# associativities are not defined to disambiguate.
grammar = """
E: E "+" E | E "*" E | "(" E ")" | /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions, debug=True)
# Even this simple expression has 2 different interpretations
# (4 + 2) * 3 and
# 4 + (2 * 3)
results = p.parse("4 + 2 * 3")
assert len(results) == 2
assert 18 in results and 10 in results
# Adding one more operand rises number of interpretations to 5
results = p.parse("4 + 2 * 3 + 8")
assert len(results) == 5
# One more and there are 14 interpretations
results = p.parse("4 + 2 * 3 + 8 * 5")
assert len(results) == 14
# The number of interpretation will be the Catalan number of n
# where n is the number of operations.
# https://en.wikipedia.org/wiki/Catalan_number
# This number rises very fast. For 10 operations number of interpretations
# will be 16796!
# If we rise priority for multiplication operation we reduce ambiguity.
# Default production priority is 10. Here we will raise it to 15 for
# multiplication.
grammar = """
E: E "+" E | E "*" E {15}| "(" E ")" | /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions)
# This expression now has 2 interpretation:
# (4 + (2*3)) + 8
# 4 + ((2*3) + 8)
# This is due to associativity of + operation which is not defined.
results = p.parse("4 + 2 * 3 + 8")
assert len(results) == 2
# If we define associativity for both + and * we have resolved all
# ambiguities in the grammar.
grammar = """
E: E "+" E {left}| E "*" E {left, 15}| "(" E ")" | /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions)
results = p.parse("4 + 2 * 3 + 8 * 5 * 3")
assert len(results) == 1
assert results[0] == 4 + 2 * 3 + 8 * 5 * 3
def test_epsilon_grammar():
grammar = """
Model: Prods EOF;
Prods: Prod | Prods Prod | EMPTY;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, debug=True)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
results = p.parse(txt)
assert len(results) == 1
results = p.parse("")
assert len(results) == 1
def test_non_eof_grammar_nonempty():
"""
Grammar that is not anchored by EOF at the end might
result in multiple trees that are produced by sucessful
parses of the incomplete input.
"""
grammar_nonempty = """
Model: Prods;
Prods: Prod | Prods Prod;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g_nonempty = Grammar.from_string(grammar_nonempty)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
p = GLRParser(g_nonempty, debug=True)
results = p.parse(txt)
# There is three succesful parses.
# e.g. one would be the production 'First = One Two three Second' and the
# parser could not continue as the next token is '=' but it succeds as
# we haven't terminated our model with EOF so we allow partial parses.
assert len(results) == 3
def test_non_eof_grammar_empty():
"""
Grammar that is not anchored by EOF at the end might
result in multiple trees that are produced by sucessful
parses of the incomplete input.
"""
grammar_empty = """
Model: Prods;
Prods: Prod | Prods Prod | EMPTY;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g_empty = Grammar.from_string(grammar_empty)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
p = GLRParser(g_empty, debug=True)
results = p.parse(txt)
assert len(results) == 3
results = p.parse("")
assert len(results) == 1
|
[
"[email protected]"
] | |
a083f7ad8469cb81abd1209db8dd1c48afc39efa
|
52d9f9fb6348c51e14c470e4405a79204ea98980
|
/unit5_Auto_Test_Model/test5_3模块化与参数化/read_json.py
|
cf0bae59ca450578251cece4a4810693c6a6ce5f
|
[] |
no_license
|
UULIN/automation_test
|
0a815d9b1032d8d393e4f340bd294f17e20d194d
|
5a193bb897d653a44e0376bcb6592d7b1811a424
|
refs/heads/master
| 2023-01-05T15:07:54.133329 | 2020-11-02T04:58:11 | 2020-11-02T04:58:11 | 280,456,044 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
"""
读取json
"""
import json
import os
path = os.path.abspath("config")
with open(path+"\\user_info.json", "r") as f:
data = f.read()
userlist = json.loads(data)
print(userlist)
|
[
"[email protected]"
] | |
7a28ea1553bdc4de0d169a474e8f80f39f34399a
|
bf66a28310d934fd1b8b14f762b336b9ec893a04
|
/spaces/management/commands/seed_detail_spaces.py
|
f9b9f8cc4f792d7c46b1fa631cb36906e2b90db2
|
[] |
no_license
|
jeongmin14/enter_cloud-backend
|
0664a6563ea090926a8522b454b762afed1f5c9d
|
789b358e8c3cf5be8505185c048e10556bfd9e0a
|
refs/heads/main
| 2023-03-26T03:54:39.565076 | 2021-03-17T13:09:32 | 2021-03-17T13:09:32 | 348,644,724 | 0 | 0 | null | 2021-03-17T13:08:07 | 2021-03-17T09:09:14 | null |
UTF-8
|
Python
| false | false | 3,291 |
py
|
import csv
import random
import bcrypt
from django_seed import Seed
from faker import Faker
from django.contrib.admin.utils import flatten
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.db.models import Count
from users.models import Host
from spaces.models import (Space,
Tag,
Facility,
BreakDay,
Type,
SubImage,
SpaceFacility,
SpaceTag,
SpaceBreakday,
DetailSpace,
DetailType,
DetailFacility)
from my_settings import (name_list,
simple_info_list,
main_info_list,
detail_name_list,
detail_facility_list)
class Command(BaseCommand):
help = "create detail spaces"
def add_arguments(self, parser):
parser.add_argument("--number", type=int, default=1)
def handle(self, *args, **options):
number = options.get("number")
spaces = Space.objects.all()
file = open("all.csv", mode="r")
reader = file.readlines()
image_length = len(reader)
detail_types = DetailType.objects.all()
detail_facilities = DetailFacility.objects.all()
seeder = Seed.seeder()
seeder.add_entity(
DetailSpace,
number,
{
"space": lambda x : random.choice(spaces) if spaces.aggregate(Count("detailspace"))["detailspace__count"] < 3 else random.choice(spaces),
"name": lambda x : random.choice(detail_name_list),
"information": lambda x : random.choice(main_info_list),
"image": lambda x : reader[random.randint(0, image_length-1)],
"min_reservation_time": lambda x : random.randint(2, 5),
"min_people": lambda x : random.randint(1, 2),
"max_people": lambda x : random.randint(4, 10),
"price": lambda x : random.randint(5, 40) * 1000
}
)
seed_detail_space = seeder.execute()
detail_space_id_list = flatten(seed_detail_space.values())
for detail_space_id in detail_space_id_list:
detail_space = DetailSpace.objects.get(id = detail_space_id)
random_number = random.randint(1, len(detail_types))
detail_type_list = detail_types[random_number:random_number + 2]
detail_space.detailtype_set.set(detail_type_list)
random_number = random.randint(1, len(detail_facilities))
detail_facility_list = detail_facilities[random_number:random_number + 6]
detail_space.detailfacility_set.set(detail_facility_list)
self.stdout.write(self.style.SUCCESS(f'spaces created {number}'))
|
[
"[email protected]"
] | |
0285552898e2f3c4ef99eed1b5f9c01d312dad95
|
5c41836471e08252f11bc51ce54e4d449f6a4f88
|
/cajas/migrations/0004_auto_20180807_1115.py
|
b0877a004b33455f8b8122267004dd4dcc2bc3f3
|
[] |
no_license
|
wahello/dr_amor_app
|
1e9be88681d94bda5425d006a769fd3c55edbadb
|
3604f7992df97e981f6f72fd844b58186ebed6ef
|
refs/heads/master
| 2020-04-27T10:46:46.453354 | 2019-02-25T04:50:34 | 2019-02-25T04:50:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
# Generated by Django 2.0.2 on 2018-08-07 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cajas', '0003_auto_20180801_2003'),
]
operations = [
migrations.AlterField(
model_name='billetemoneda',
name='activo',
field=models.BooleanField(default=False),
),
]
|
[
"[email protected]"
] | |
fa7186feee14a377eaf7cacdfd7aafc2db7bccca
|
6518c74441a68fc99b2b08423b5ea11480806499
|
/tests/tracking/test_artifact_utils.py
|
42df335dbdc72043cd5ea533d5a16b1c13f91b18
|
[
"Apache-2.0"
] |
permissive
|
criteo-forks/mlflow
|
da58e64d09700623810da63999a1aca81b435b90
|
499284d8dc9e9ec79d8d9dbd03c58d162a2b7eaa
|
refs/heads/master
| 2023-04-14T17:59:29.997458 | 2022-01-11T09:50:26 | 2022-01-11T09:50:26 | 191,391,769 | 5 | 4 |
Apache-2.0
| 2023-04-07T15:16:20 | 2019-06-11T14:44:00 |
Python
|
UTF-8
|
Python
| false | false | 5,304 |
py
|
import os
from unittest import mock
from unittest.mock import ANY
import mlflow
from mlflow.tracking.artifact_utils import (
_download_artifact_from_uri,
_upload_artifacts_to_databricks,
)
def test_artifact_can_be_downloaded_from_absolute_uri_successfully(tmpdir):
artifact_file_name = "artifact.txt"
artifact_text = "Sample artifact text"
local_artifact_path = tmpdir.join(artifact_file_name).strpath
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_path = "artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_path)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_path)
downloaded_artifact_path = os.path.join(
_download_artifact_from_uri(artifact_uri), artifact_file_name
)
assert downloaded_artifact_path != local_artifact_path
assert downloaded_artifact_path != logged_artifact_path
with open(downloaded_artifact_path, "r") as f:
assert f.read() == artifact_text
def test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory(tmpdir):
artifact_file_name = "artifact.txt"
artifact_text = "Sample artifact text"
local_artifact_path = tmpdir.join(artifact_file_name).strpath
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_subdir = "logged_artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir)
artifact_output_path = tmpdir.join("artifact_output").strpath
os.makedirs(artifact_output_path)
_download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path)
assert logged_artifact_subdir in os.listdir(artifact_output_path)
assert artifact_file_name in os.listdir(
os.path.join(artifact_output_path, logged_artifact_subdir)
)
with open(
os.path.join(artifact_output_path, logged_artifact_subdir, artifact_file_name), "r"
) as f:
assert f.read() == artifact_text
def test_download_artifact_with_special_characters_in_file_name_and_path(tmpdir):
artifact_file_name = " artifact_ with! special characters.txt"
artifact_sub_dir = " path with ! special characters"
artifact_text = "Sample artifact text"
local_sub_path = os.path.join(tmpdir, artifact_sub_dir)
os.makedirs(local_sub_path)
local_artifact_path = os.path.join(local_sub_path, artifact_file_name)
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_subdir = "logged_artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir)
artifact_output_path = os.path.join(tmpdir, "artifact output path!")
os.makedirs(artifact_output_path)
_download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path)
assert logged_artifact_subdir in os.listdir(artifact_output_path)
assert artifact_file_name in os.listdir(
os.path.join(artifact_output_path, logged_artifact_subdir)
)
with open(
os.path.join(artifact_output_path, logged_artifact_subdir, artifact_file_name), "r"
) as f:
assert f.read() == artifact_text
def test_upload_artifacts_to_databricks():
import_root = "mlflow.tracking.artifact_utils"
with mock.patch(import_root + "._download_artifact_from_uri") as download_mock, mock.patch(
import_root + ".DbfsRestArtifactRepository"
) as repo_mock:
new_source = _upload_artifacts_to_databricks(
"dbfs:/original/sourcedir/",
"runid12345",
"databricks://tracking",
"databricks://registry:ws",
)
download_mock.assert_called_once_with("dbfs://tracking@databricks/original/sourcedir/", ANY)
repo_mock.assert_called_once_with(
"dbfs://registry:ws@databricks/databricks/mlflow/tmp-external-source/"
)
assert new_source == "dbfs:/databricks/mlflow/tmp-external-source/runid12345/sourcedir"
def test_upload_artifacts_to_databricks_no_run_id():
from uuid import UUID
import_root = "mlflow.tracking.artifact_utils"
with mock.patch(import_root + "._download_artifact_from_uri") as download_mock, mock.patch(
import_root + ".DbfsRestArtifactRepository"
) as repo_mock, mock.patch("uuid.uuid4", return_value=UUID("4f746cdcc0374da2808917e81bb53323")):
new_source = _upload_artifacts_to_databricks(
"dbfs:/original/sourcedir/", None, "databricks://tracking:ws", "databricks://registry"
)
download_mock.assert_called_once_with(
"dbfs://tracking:ws@databricks/original/sourcedir/", ANY
)
repo_mock.assert_called_once_with(
"dbfs://registry@databricks/databricks/mlflow/tmp-external-source/"
)
assert (
new_source == "dbfs:/databricks/mlflow/tmp-external-source/"
"4f746cdcc0374da2808917e81bb53323/sourcedir"
)
|
[
"[email protected]"
] | |
df611eb84cc4a732b0e4702ae54d667788b7c362
|
9eef3e4cf39a659268694cf08a4a799af8fb13e2
|
/inference/surrogate/nn_surrogate/learning/training.py
|
313ed4ae3a7b98e3561490f4cf3964ce074c4d2c
|
[] |
no_license
|
cselab/tRBC-UQ
|
c30ec370939b949c989d2e9cd30137073b53e7d2
|
cd7711b76c76e86bc6382914111f4fa42aa78f2c
|
refs/heads/master
| 2023-04-18T03:06:49.175259 | 2022-10-25T15:45:07 | 2022-10-25T15:45:07 | 483,407,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,563 |
py
|
#!/usr/bin/env python
from copy import deepcopy
import torch
from torch.utils.data import DataLoader
def train_model(model,
data_loader: DataLoader,
x_valid: torch.tensor,
y_valid: torch.tensor,
criterion = torch.nn.MSELoss(),
lr=0.1,
max_epoch=5000,
info_every=100,
device=torch.device('cpu')):
model.to(device)
x_valid, y_valid = x_valid.to(device), y_valid.to(device)
with torch.no_grad():
model_bck = deepcopy(model)
best_valid_loss = 1e20
num_worse_valid_losses = 0
patience = 10
max_number_of_rounds = 5
number_of_rounds = 0
lr_reduction_factor = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# trace
train_losses = list()
valid_losses = list()
for epoch in range(max_epoch):
y_pred_valid = model(x_valid)
with torch.no_grad():
valid_loss = criterion(y_pred_valid, y_valid)
if valid_loss.item() < best_valid_loss:
best_valid_loss = valid_loss.item()
num_worse_valid_losses = 0
model_bck.load_state_dict(model.state_dict())
else:
num_worse_valid_losses += 1
if num_worse_valid_losses > patience:
model.load_state_dict(model_bck.state_dict())
num_worse_valid_losses = 0
number_of_rounds += 1
lr *= lr_reduction_factor
print(f"reduced lr from {lr/lr_reduction_factor:e} to {lr:e}")
# set the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if number_of_rounds >= max_number_of_rounds:
break
train_loss = 0.0
for x_batch, y_batch in data_loader:
x_batch, y_batch = x_batch.to(device), y_batch.to(device)
y_pred = model(x_batch)
batch_loss = criterion(y_pred, y_batch)
optimizer.zero_grad()
batch_loss.backward()
if epoch > 0:
optimizer.step()
train_loss += batch_loss.cpu().detach().numpy()
nbatches = len(data_loader)
train_loss /= nbatches
if epoch % info_every == 0:
print(f"epoch {epoch:05d}: training loss {train_loss.item():e}, valid loss {valid_loss.item():e}")
# save trace
train_losses.append(train_loss.item())
valid_losses.append(valid_loss.item())
return model, train_losses, valid_losses
|
[
"[email protected]"
] | |
4d708ea3bd225f0c0c2dc484df2259a106ba8471
|
f0fe4f17b5bbc374656be95c5b02ba7dd8e7ec6d
|
/all_functions/linux server/python GUI/menus/popupMenu.py
|
1f03b6eadd92ccda984447a8fd4cfb671036029d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
Heroku-elasa/heroku-buildpack-python-ieee-new
|
f46a909ebc524da07f8e15c70145d1fe3dbc649b
|
06ec2fda04d9e478ed2506400e460489b0ca91ab
|
refs/heads/master
| 2022-12-10T13:14:40.742661 | 2020-01-29T14:14:10 | 2020-01-29T14:14:10 | 60,902,385 | 0 | 0 |
MIT
| 2022-12-07T23:34:36 | 2016-06-11T10:36:10 |
Python
|
UTF-8
|
Python
| false | false | 2,177 |
py
|
# submenu.py
import wx
########################################################################
class MyForm(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="Popup Menu Tutorial")
panel = wx.Panel(self, wx.ID_ANY)
lbl = wx.StaticText(panel, label="Right click anywhere!")
self.Bind(wx.EVT_CONTEXT_MENU, self.onContext)
#----------------------------------------------------------------------
def onContext(self, event):
"""
Create and show a Context Menu
"""
# only do this part the first time so the events are only bound once
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.itemTwoId = wx.NewId()
self.itemThreeId = wx.NewId()
self.Bind(wx.EVT_MENU, self.onPopup, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.onPopup, id=self.itemTwoId)
self.Bind(wx.EVT_MENU, self.onExit, id=self.itemThreeId)
# build the menu
menu = wx.Menu()
itemOne = menu.Append(self.popupID1, "ItemOne")
itemTwo = menu.Append(self.itemTwoId, "ItemTwo")
itemThree = menu.Append(self.itemThreeId, "Exit")
# show the popup menu
self.PopupMenu(menu)
menu.Destroy()
#----------------------------------------------------------------------
def onExit(self, event):
"""
Exit program
"""
self.Close()
#----------------------------------------------------------------------
def onPopup(self, event):
"""
Print the label of the menu item selected
"""
itemId = event.GetId()
menu = event.GetEventObject()
menuItem = menu.FindItemById(itemId)
print menuItem.GetLabel()
#----------------------------------------------------------------------
# Run the program
if __name__ == "__main__":
app = wx.App(False)
frame = MyForm().Show()
app.MainLoop()
|
[
"[email protected]"
] | |
3f87c89ac30e6067d6d60050f99e7ddc4417d01a
|
0435b6282cfc7cb27c5c5f2d7d2bcbf160ca8d7d
|
/Project_2/linear.py
|
7c9b6e7ce2bec81bd7b80dcd64aeeecc566bf256
|
[] |
no_license
|
zx-joe/EPFL-Deep-Learning
|
39d97b1d02c2d4b5fdee471ffe41ce06328e2f9a
|
8d2b1aa94608e6cdc2dcc60fa6c5f4e3b7e69e36
|
refs/heads/master
| 2022-12-11T11:11:07.960851 | 2020-09-08T09:03:21 | 2020-09-08T09:03:21 | 190,944,017 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,972 |
py
|
import torch
import math
from torch import Tensor, FloatTensor
import matplotlib.pyplot as plt
from module import Module
class Linear (Module) :
# one fully-connected layer
def __init__(self, in_dim, out_dim, eps=1., method='xavier'):
self.in_dim=in_dim
self.out_dim=out_dim
# define weight, bias and their gradient
self.w=FloatTensor(out_dim, in_dim)
self.dw=FloatTensor(out_dim, in_dim)
self.b=FloatTensor(out_dim)
self.db=FloatTensor(out_dim)
# initialization: defaulted as Xavier
if method=='zero':
self.w=self.w.fill_(0)
self.b=self.w.fill_(0)
elif method=='normal':
self.w=self.w.normal_(mean=0,std=eps)
self.w=self.b.normal_(mean=0,std=eps)
else:
temp_std=1./math.sqrt((self.in_dim + self.out_dim)/2)
self.w=self.w.normal_(mean=0,std=temp_std)
self.b=self.b.normal_(mean=0,std=temp_std)
# zero gradient intialization
self.dw=self.dw.zero_()
self.db=self.db.zero_()
def forward( self ,x ):
# y = w * x + b
self.input=x.clone()
self.output=self.w.matmul(self.input)+self.b
#self.output=self.w @ self.input + self.b
return self.output
def backward( self , gradwrtoutput ):
temp_wt=self.w.t()
# dw = dL/dy * x
temp_dw=gradwrtoutput.view(-1,1).mm(self.input.view(1,-1))
self.dw.add_(temp_dw)
# db = dL/dy
temp_db=gradwrtoutput.clone()
self.db.add_(temp_db)
# dx = w.T * dL/dy
temp_dx=temp_wt.matmul(gradwrtoutput)
return temp_dx
def param(self ) :
return [ self.w, self.dw , self.b, self.db]
def zero_grad(self):
self.dw.zero_()
self.db.zero_()
|
[
"[email protected]"
] | |
cc29d5398e6d41ed19ef958c63351c898a368bb6
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/boto/configservice/exceptions.py
|
58aa550f9cd9b165a42667b8d7135033fda20174
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 |
Apache-2.0
| 2023-09-14T03:05:41 | 2010-06-21T19:46:51 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,528 |
py
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidLimitException(BotoServerError):
pass
class NoSuchBucketException(BotoServerError):
pass
class InvalidSNSTopicARNException(BotoServerError):
pass
class ResourceNotDiscoveredException(BotoServerError):
pass
class MaxNumberOfDeliveryChannelsExceededException(BotoServerError):
pass
class LastDeliveryChannelDeleteFailedException(BotoServerError):
pass
class InsufficientDeliveryPolicyException(BotoServerError):
pass
class InvalidRoleException(BotoServerError):
pass
class InvalidTimeRangeException(BotoServerError):
pass
class NoSuchDeliveryChannelException(BotoServerError):
pass
class NoSuchConfigurationRecorderException(BotoServerError):
pass
class InvalidS3KeyPrefixException(BotoServerError):
pass
class InvalidDeliveryChannelNameException(BotoServerError):
pass
class NoRunningConfigurationRecorderException(BotoServerError):
pass
class ValidationException(BotoServerError):
pass
class NoAvailableConfigurationRecorderException(BotoServerError):
pass
class InvalidNextTokenException(BotoServerError):
pass
class InvalidConfigurationRecorderNameException(BotoServerError):
pass
class NoAvailableDeliveryChannelException(BotoServerError):
pass
class MaxNumberOfConfigurationRecordersExceededException(BotoServerError):
pass
|
[
"[email protected]"
] | |
232ac4debdccb67b46d0441af9c4ba867812edf9
|
3539d0e3ddd7849a14876e95f0332428ec28ebf7
|
/Data Scientist Career Path/11. Foundations of Machine Learning Supervised Learning/4. Classification/1. KNN/7. classify.py
|
9009fe68fc2f9cd150796ea094d743cfcc322a16
|
[
"MIT"
] |
permissive
|
DincerDogan/Data-Science-Learning-Path
|
ff146de2cf4ebc5fedfa9377babf959208dfe7e6
|
2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb
|
refs/heads/main
| 2023-05-08T10:53:47.449974 | 2021-06-06T21:27:31 | 2021-06-06T21:27:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,141 |
py
|
from movies import movie_dataset, movie_labels, normalize_point
def distance(movie1, movie2):
squared_difference = 0
for i in range(len(movie1)):
squared_difference += (movie1[i] - movie2[i]) ** 2
final_distance = squared_difference ** 0.5
return final_distance
def classify(unknown, dataset, labels, k):
distances = []
#Looping through all points in the dataset
for title in dataset:
movie = dataset[title]
distance_to_point = distance(movie, unknown)
#Adding the distance and point associated with that distance
distances.append([distance_to_point, title])
distances.sort()
#Taking only the k closest points
neighbors = distances[0:k]
num_good = 0
num_bad = 0
for neighbor in neighbors:
title = neighbor[1]
if labels[title] == 0:
num_bad += 1
elif labels[title] == 1:
num_good += 1
if num_good > num_bad:
return 1
else:
return 0
print("Call Me By Your Name" in movie_dataset)
my_movie = [3500000, 132, 2017]
normalized_my_movie = normalize_point(my_movie)
print(normalized_my_movie)
print(classify(normalized_my_movie, movie_dataset, movie_labels, 5))
|
[
"[email protected]"
] | |
aa73120bf09c8c6af195213100bedac6b5e7e642
|
54934cfe32ce5aa5c2e718b0c5c2afa4b458fe75
|
/25ch/graph.py
|
617c1d6ad6e8616a8e8c0e6ea2ca8c171b8b289c
|
[] |
no_license
|
mccarvik/intro_to_algorithms
|
46d0ecd20cc93445e0073eb0041d481a29322e82
|
c2d41706150d2bb477220b6f929510c4fc4ba30b
|
refs/heads/master
| 2021-04-12T12:25:14.083434 | 2019-11-09T05:26:28 | 2019-11-09T05:26:28 | 94,552,252 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,475 |
py
|
from random import randint
import pdb
class Graph:
"""
Graph data structure.
"""
def __init__(self, fname = None, numVertices = None, numEdges = None, weightRange = None, directed = True):
"""
Generates a weighted graph.
"""
self.adjacent = {}
self.weight = {}
if fname == None:
if any(arg == None for arg in (numVertices, numEdges, weightRange)):
numVertices, numEdges, weightRange = map(int, input("numVertices, numEdges, weightRange: ").split())
self.randomGraph(numVertices, numEdges, weightRange, directed)
else:
self.loadGraph(fname, directed)
def numVertices(self):
"""
Returns the number of vertices in the graph.
"""
return len(self.adjacent)
def vertices(self):
"""
Returns the list of vertices in the graph.
"""
return range(self.numVertices())
def edges(self):
"""
Returns a generator containing the edges in the graph.
"""
return ((fromVertex,toVertex) for fromVertex in self.vertices() for toVertex in self.adjacent[fromVertex])
def addDirectedEdge(self, fromVertex, toVertex, weight):
"""
Inserts a weighted directed edge into the graph.
"""
self.adjacent.setdefault(fromVertex, set()).add(toVertex)
self.weight[(fromVertex, toVertex)] = weight
def addUndirectedEdge(self, fromVertex, toVertex, weight):
"""
Inserts a weighted undirected edge into the graph.
"""
self.addDirectedEdge(fromVertex, toVertex, weight)
self.addDirectedEdge(toVertex, fromVertex, weight)
def randomGraph(self, numVertices, numEdges, weightRange, directed):
"""
Generates a random graph.
"""
addEdge = self.addDirectedEdge if directed else self.addUndirectedEdge
for vertex in range(numVertices):
self.adjacent[vertex] = set()
for edge in range(numEdges):
fromVertex = toVertex = None
while fromVertex == toVertex:
fromVertex = randint(0, numVertices-1)
toVertex = randint(0, numVertices-1)
weight = randint(0, weightRange)
addEdge(fromVertex, toVertex, weight)
def loadGraph(self, fname, directed):
"""
Loads a graph from a file containing a list of edges
of the form: fromVertex, toVertex, weight.
"""
addEdge = self.addDirectedEdge if directed else self.addUndirectedEdge
with open(fname, 'r') as f:
for vertex in range(int(f.readline())):
self.adjacent[vertex] = set()
for line in f.readlines():
fromVertex, toVertex, weight = map(int, line.split())
addEdge(fromVertex, toVertex, weight)
def adjacentStr(self, fromVertex):
"""
Returns a string representing the neighborhood of the
given vertex.
"""
return ", ".join(f"({toVertex}, {self.weight[(fromVertex, toVertex)]})" for toVertex in self.adjacent[fromVertex])
def __str__(self):
"""
Returns a string representing the graph.
"""
return "\n".join(f"{vertex}: {self.adjacentStr(vertex)}" for vertex in range(self.numVertices()))
def __repr__(self):
"""
Represents the graph.
"""
return str(self)
|
[
"[email protected]"
] | |
01f71854fb9d777d10176477c617eccd675ac52c
|
a3f1e80179c23d9202d72b75dd37a49b44785f45
|
/api/client/test/test_api_catalog_upload_item.py
|
f3b1cbbfc6b43d84d22085c8e5fe119b0da41801
|
[
"Apache-2.0"
] |
permissive
|
pvaneck/mlx
|
b1e82fae5ac8aaa1dddac23aaa38c46f6e6cfc27
|
6edaa0bd77787c56b737322a0c875ae30de6cd49
|
refs/heads/main
| 2023-05-14T06:08:38.404133 | 2021-05-04T01:41:11 | 2021-05-04T01:41:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,515 |
py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1.25-related-assets
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_catalog_upload_item import ApiCatalogUploadItem # noqa: E501
from swagger_client.rest import ApiException
class TestApiCatalogUploadItem(unittest.TestCase):
"""ApiCatalogUploadItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiCatalogUploadItem(self):
"""Test ApiCatalogUploadItem"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.api_catalog_upload_item.ApiCatalogUploadItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
b75c4770199293de8d847af50386a6f6211d23b6
|
26c0f80688f75a188097a232c229a73c8e7cc6ed
|
/user/migrations/0031_alter_profile_zipcode.py
|
26fb0ef6fbd873151e7d3d517d0ac2cbcf69cd3b
|
[] |
no_license
|
creep1g/DjangoWebstore
|
8207d7ea53c478fb7e5745e1c6ae6699102b5df5
|
bd27340b86bf2289b8c14216462d932ccdf4986d
|
refs/heads/main
| 2023-05-06T09:50:04.846489 | 2021-05-28T14:40:40 | 2021-05-28T14:40:40 | 371,730,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
# Generated by Django 3.2 on 2021-05-12 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0030_merge_0024_alter_profile_card_cvc_0029_profile_city'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='zipcode',
field=models.IntegerField(),
),
]
|
[
"[email protected]"
] | |
d5b1789b3dd0c839b86dd726c2e71effbdd484ab
|
371277a2586e85337cd50a0e2889a962b89fbca0
|
/Semana 5/Subida de Archivos Flask - Portfolio/models/conocimiento.py
|
e4e67176849e22c32a4fec64a7569de0c8b0abe9
|
[] |
no_license
|
Jesuscueva/Virtual-Back-5
|
eca62561f19a3028880e3a68868ff4f1c271d579
|
2f4557a6cdae91c9fd4f22103b5bdd473845d5a4
|
refs/heads/main
| 2023-04-02T13:47:58.161874 | 2021-04-10T02:09:01 | 2021-04-10T02:09:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,142 |
py
|
from config.base_datos import bd
from sqlalchemy import Column, types
from sqlalchemy.schema import ForeignKey
class ConocimientoModel(bd.Model):
__tablename__ = 't_conocimiento'
conocimientoId = Column(
name='conocimiento_id',
type_=types.Integer,
primary_key=True,
unique=True,
autoincrement=True,
nullable=False
)
conocimientoTitulo = Column(
name='conocimiento_titulo',
type_=types.String(45),
nullable=False
)
conocimientoPuntuacion = Column(
name='conocimiento_puntuacion',
type_=types.DECIMAL(2,1),
nullable=False
)
conocimientoImagenTN = Column(
name='conocimiento_imagen_thumbnail',
type_=types.TEXT,
nullable=False
)
conocimientoImagenLarge = Column(
name='conocimiento_imagen_large',
type_=types.TEXT,
nullable=False
)
conocimientoDescripcion = Column(
name='conocimiento_descripcion',
type_=types.String(200),
nullable=False
)
# FK
categoria = Column(
ForeignKey('t_categoria.cat_id'),
name='cat_id',
type_=types.Integer,
nullable=False
)
def __init__(self, titulo, puntuacion, imagentn, imagenl, descripcion, categoria):
self.conocimientoTitulo = titulo
self.conocimientoPuntuacion = puntuacion
self.conocimientoImagenTN = imagentn
self.conocimientoImagenLarge = imagenl,
self.conocimientoDescripcion = descripcion
self.categoria = categoria
def save(self):
bd.session.add(self)
bd.session.commit()
def json(self):
return {
'conocimiento_id': self.conocimientoId,
'conocimiento_titulo': self.conocimientoTitulo,
'conocimiento_puntuacion': str(self.conocimientoPuntuacion),
'conocimiento_imagen_thumbnail': self.conocimientoImagenTN,
'conocimiento_imagen_large': self.conocimientoImagenLarge,
'conocimiento_descripcion': self.conocimientoDescripcion,
'cat_id': self.categoria,
}
|
[
"[email protected]"
] | |
6648dc83958d98e09181be589c965e5d5083dbc0
|
e2604baf3baddbbebf8597c7a3a76bac988efb41
|
/venv/bin/wsdump.py
|
6f6c90fbda8537d5bc52a71ac71732e9a1879bcb
|
[] |
no_license
|
Surajgupta5/Django-WIth-Docker
|
2fe1037451c113feba72c50d5425d4461c2f40be
|
ca879e43af043dccba6b325f89ac3c6f495dbe56
|
refs/heads/master
| 2022-07-19T18:55:48.243626 | 2020-05-11T05:53:52 | 2020-05-11T05:53:52 | 262,951,414 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,437 |
py
|
#!/home/workspace/project2/venv/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
[
"[email protected]"
] | |
7a5019f032d3564ba4d7d1adff38dcfc7b6dad35
|
d109b64bfa8c80a6ec7d647beeadf9fe1c667fac
|
/class1101/LED11.py
|
ef6615d3a9cc7c3e69773a9d62dd8e9899f26d39
|
[] |
no_license
|
jumbokh/micropython_class
|
d34dd0a2be39d421d3bbf31dbb7bfd39b5f6ac6f
|
950be81582dba970e9c982e2e06fa21d9e9a0fdd
|
refs/heads/master
| 2022-10-10T22:27:02.759185 | 2022-10-01T14:44:31 | 2022-10-01T14:44:31 | 173,898,623 | 4 | 3 | null | 2020-03-31T09:57:23 | 2019-03-05T07:40:38 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 160 |
py
|
from machine import Pin
import utime
LED = None
LED = Pin(11, Pin.OUT)
while True:
LED.value(0)
utime.sleep(1)
LED.value(1)
utime.sleep(1)
|
[
"[email protected]"
] | |
225cf46464767cc87e3de5acdd111b3d2d50c482
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2594/60640/267498.py
|
06ef98d3317b58f2f24605f47cf036c4eaa392d4
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 752 |
py
|
"""
O(N)
"""
t = int(input())
for i in range(t):
inp = list(input())
set_inp = list(set(inp))
if len(set_inp) == len(inp):
print(-1)
else:
MAX_CHAR = 256
# 记录每个字符的位置,-1表示从未出现过
firstIndex = [-1 for x in range(MAX_CHAR)]
res = 0
for j in range(len(inp)):
# 字符的位置
start = firstIndex[ord(inp[j])]
# 字符第一次出现,更新其位置
if start == -1:
firstIndex[ord(inp[j])] = j
# 字符再次出现,此时的位置是j,减掉原来存储的位置start,获取中间字符长度
else:
res = max(res, abs(j-start-1))
print(res)
|
[
"[email protected]"
] | |
0ba65146cf6659db44dd47b906d4f6f8ea99fa48
|
6e800b3513537622df14bb598abe9c051116106c
|
/51-100/088MergeEasy.py
|
14ead1c1a42033d242f68373f994af50d34f3ddf
|
[] |
no_license
|
Huxhh/LeetCodePy
|
fd72f03193d1f0b58c44bffc46a9a59ba9714215
|
6a99e84c5742ca68012b14da362f6c3255e10b21
|
refs/heads/master
| 2023-06-09T09:23:54.209025 | 2023-05-31T16:29:03 | 2023-05-31T16:29:03 | 148,866,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,084 |
py
|
# coding=utf-8
def merge(nums1, m, nums2, n):
index = m + n - 1
x = m - 1
y = n - 1
while True:
if x < 0 and y < 0:
break
if x >= 0 and y >= 0:
if nums1[x] >= nums2[y]:
nums1[index] = nums1[x]
x -= 1
else:
nums1[index] = nums2[y]
y -= 1
index -= 1
elif x >= 0 > y:
nums1[index] = nums1[x]
x -= 1
index -= 1
elif x < 0 <= y:
nums1[index] = nums2[y]
y -= 1
index -= 1
return nums1
def merge2(nums1, m, nums2, n):
index = m + n - 1
while m > 0 and n > 0:
if nums1[m-1] > nums2[n-1]:
nums1[index] = nums1[m-1]
m -= 1
else:
nums1[index] = nums2[n-1]
n -= 1
index -= 1
if n > 0:
nums1[:n] = nums2[:n]
return nums1
if __name__ == '__main__':
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
print(merge2(nums1, m, nums2, n))
|
[
"[email protected]"
] | |
c6f80b6fcff9d820146a6c798e6abee898629364
|
4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda
|
/python/oanda/models/calculated_trade_state.py
|
996c9bbd4d0e98b92e6550ccd12dbbf12aedeb73
|
[
"MIT"
] |
permissive
|
KoenBal/OANDA_V20_Client
|
ed4c182076db62ecf7a216c3e3246ae682300e94
|
e67b9dbaddff6ed23e355d3ce7f9c9972799c702
|
refs/heads/master
| 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,967 |
py
|
# coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CalculatedTradeState(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'unrealized_pl': 'str',
'margin_used': 'str'
}
attribute_map = {
'id': 'id',
'unrealized_pl': 'unrealizedPL',
'margin_used': 'marginUsed'
}
def __init__(self, id=None, unrealized_pl=None, margin_used=None): # noqa: E501
"""CalculatedTradeState - a model defined in Swagger""" # noqa: E501
self._id = None
self._unrealized_pl = None
self._margin_used = None
self.discriminator = None
if id is not None:
self.id = id
if unrealized_pl is not None:
self.unrealized_pl = unrealized_pl
if margin_used is not None:
self.margin_used = margin_used
@property
def id(self):
"""Gets the id of this CalculatedTradeState. # noqa: E501
The Trade's ID. # noqa: E501
:return: The id of this CalculatedTradeState. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CalculatedTradeState.
The Trade's ID. # noqa: E501
:param id: The id of this CalculatedTradeState. # noqa: E501
:type: str
"""
self._id = id
@property
def unrealized_pl(self):
"""Gets the unrealized_pl of this CalculatedTradeState. # noqa: E501
The Trade's unrealized profit/loss. # noqa: E501
:return: The unrealized_pl of this CalculatedTradeState. # noqa: E501
:rtype: str
"""
return self._unrealized_pl
@unrealized_pl.setter
def unrealized_pl(self, unrealized_pl):
"""Sets the unrealized_pl of this CalculatedTradeState.
The Trade's unrealized profit/loss. # noqa: E501
:param unrealized_pl: The unrealized_pl of this CalculatedTradeState. # noqa: E501
:type: str
"""
self._unrealized_pl = unrealized_pl
@property
def margin_used(self):
"""Gets the margin_used of this CalculatedTradeState. # noqa: E501
Margin currently used by the Trade. # noqa: E501
:return: The margin_used of this CalculatedTradeState. # noqa: E501
:rtype: str
"""
return self._margin_used
@margin_used.setter
def margin_used(self, margin_used):
"""Sets the margin_used of this CalculatedTradeState.
Margin currently used by the Trade. # noqa: E501
:param margin_used: The margin_used of this CalculatedTradeState. # noqa: E501
:type: str
"""
self._margin_used = margin_used
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CalculatedTradeState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
6f227c5ce3a2d188622f589658f80e019ffc2ec6
|
7e4460c85790fae2d470182732289bcd1b8777b2
|
/Process/process_scripts.py
|
c6028329d884b48f3b57d3958e624f02c6d43b3a
|
[] |
no_license
|
khamukkamu/swconquest-msys
|
5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e
|
71337a4ae9c507b9440e84cf49d31fc67a781978
|
refs/heads/master
| 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null |
UTF-8
|
Python
| false | false | 1,812 |
py
|
import string
from module_info import *
from module_scripts import *
from process_common import *
from process_operations import *
from module_info import wb_compile_switch as is_wb
def save_scripts(variable_list,variable_uses,scripts,tag_uses,quick_strings):
file = open(export_dir + "scripts.txt","w")
file.write("scriptsfile version 1\n")
file.write("%d\n"%len(scripts))
temp_list = []
list_type = type(temp_list)
for i_script in xrange(len(scripts)):
func = scripts[i_script]
if (type(func[1]) == list_type):
file.write("%s -1\n"%(convert_to_identifier(func[0])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[1], variable_list,variable_uses,tag_uses,quick_strings, convert_to_identifier(func[0]) )
else:
file.write("%s %s\n"%(convert_to_identifier(func[0]), swytrailzro(func[1])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[2], variable_list,variable_uses,tag_uses,quick_strings, convert_to_identifier(func[0]) )
file.write("\n")
file.close()
def save_python_header():
if (is_wb):
file = open("./IDs/ID_scripts_wb.py","w")
else:
file = open("./IDs/ID_scripts_mb.py","w")
for i_script in xrange(len(scripts)):
file.write("script_%s = %d\n"%(convert_to_identifier(scripts[i_script][0]),i_script))
file.write("\n\n")
file.close()
print "Exporting scripts..."
save_python_header()
variable_uses = []
variables = load_variables(export_dir, variable_uses)
tag_uses = load_tag_uses(export_dir)
quick_strings = load_quick_strings(export_dir)
save_scripts(variables,variable_uses,scripts,tag_uses,quick_strings)
save_variables(export_dir,variables,variable_uses)
save_tag_uses(export_dir, tag_uses)
save_quick_strings(export_dir,quick_strings)
|
[
"[email protected]"
] | |
9c06c0a29a8845ed289678b35982f9e2dbc2a720
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03957/s736073526.py
|
4d07ff9077817797927326fee3fdb9b2cb662fdf
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
s=input()
ans="No"
for i in range(len(s)):
if s[i]=="C":
for j in range(i+1,len(s)):
if s[j]=="F":
print("Yes")
exit()
else:
print(ans)
|
[
"[email protected]"
] | |
bafd53e16b68d5c5315f2de4dc3a24be45844475
|
ae9bb7babce2a0349ae932985cf418a03057c670
|
/test_ProjectPractice/test_requests/wework/__init__.py
|
3c6ed16fe565082753b7192859a99a55e588806c
|
[] |
no_license
|
Veraun/HogwartsSDET17-1
|
d2592fcb4c9c63724c19bcf9edde349ebcd2c8af
|
6648dbfb640b065ff2c76cb6889a8f9e4f124b91
|
refs/heads/main
| 2023-07-02T05:20:32.161248 | 2021-08-06T03:55:13 | 2021-08-06T03:55:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 157 |
py
|
'''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: HogwartsSDET17
@file: __init__.py.py
@time: 2021/3/28 19:30
@Email: Warron.Wang
'''
|
[
"[email protected]"
] | |
13fe7acffc167ef651043847166ade55dfbe7fad
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/thienthientanvn.py
|
80cce4c40f006fcc7e32a7cdf2085a62934bb55f
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 962 |
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='mytextarea']/span",
'price' : "//span/div[@class='price']",
'category' : "//div[@id='accordion']/ul/li",
'description' : "//div[@class='table_center']/div[2]/table/tbody/tr/td|//div[@class='table_center']/div[3]/table/tbody/tr/td",
'images' : "//img[@id='ctl00_MainPlaceHolder_ctl00_imgLaptop']/@src|//ul/li/a[@class='highslide']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'thienthientan.vn'
allowed_domains = ['thienthientan.vn']
start_urls = ['http://www.thienthientan.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(), 'parse_item'),
Rule(LinkExtractor(), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
[
"[email protected]"
] | |
5ae2c762306e5a94e26a671d98093c5b02e5db3d
|
f167dffa2f767a0419aa82bf434852069a8baeb8
|
/lib/youtube_dl/extractor/arcpublishing.py
|
ca6a6c4d87f9f13a259f8402f5e7ef51ad097088
|
[
"MIT"
] |
permissive
|
firsttris/plugin.video.sendtokodi
|
d634490b55149adfdcb62c1af1eb77568b8da3f5
|
1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3
|
refs/heads/master
| 2023-08-18T10:10:39.544848 | 2023-08-15T17:06:44 | 2023-08-15T17:06:44 | 84,665,460 | 111 | 31 |
MIT
| 2022-11-11T08:05:21 | 2017-03-11T16:53:06 |
Python
|
UTF-8
|
Python
| false | false | 7,970 |
py
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
parse_iso8601,
try_get,
)
class ArcPublishingIE(InfoExtractor):
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX
_TESTS = [{
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'only_matching': True,
}, {
# https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/
'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1',
'only_matching': True,
}, {
# https://www.actionnewsjax.com/video/live-stream/
'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a',
'only_matching': True,
}, {
# https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/
'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3',
'only_matching': True,
}, {
# https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/
'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe',
'only_matching': True,
}, {
# https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/
'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e',
'only_matching': True,
}, {
# https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/
'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143',
'only_matching': True,
}, {
# https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/
'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055',
'only_matching': True,
}, {
# https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/
'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d',
'only_matching': True,
}, {
# https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/
'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7',
'only_matching': True,
}, {
# https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/
'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b',
'only_matching': True,
}, {
# https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html
'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685',
'only_matching': True,
}]
_POWA_DEFAULTS = [
(['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'),
([
'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo',
'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom',
'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek',
], 'video-api-cdn.%s.arcpublishing.com/api'),
]
@staticmethod
def _extract_urls(webpage):
entries = []
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
powa = extract_attributes(powa_el) or {}
org = powa.get('data-org')
uuid = powa.get('data-uuid')
if org and uuid:
entries.append('arcpublishing:%s:%s' % (org, uuid))
return entries
def _real_extract(self, url):
org, uuid = re.match(self._VALID_URL, url).groups()
for orgs, tmpl in self._POWA_DEFAULTS:
if org in orgs:
base_api_tmpl = tmpl
break
else:
base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api'
if org == 'wapo':
org = 'washpost'
video = self._download_json(
'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org),
uuid, query={'uuid': uuid})[0]
title = video['headlines']['basic']
is_live = video.get('status') == 'live'
urls = []
formats = []
for s in video.get('streams', []):
s_url = s.get('url')
if not s_url or s_url in urls:
continue
urls.append(s_url)
stream_type = s.get('stream_type')
if stream_type == 'smil':
smil_formats = self._extract_smil_formats(
s_url, uuid, fatal=False)
for f in smil_formats:
if f['url'].endswith('/cfx/st'):
f['app'] = 'cfx/st'
if not f['play_path'].startswith('mp4:'):
f['play_path'] = 'mp4:' + f['play_path']
if isinstance(f['tbr'], float):
f['vbr'] = f['tbr'] * 1000
del f['tbr']
f['format_id'] = 'rtmp-%d' % f['vbr']
formats.extend(smil_formats)
elif stream_type in ('ts', 'hls'):
m3u8_formats = self._extract_m3u8_formats(
s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native',
m3u8_id='hls', fatal=False)
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
continue
for f in m3u8_formats:
if f.get('acodec') == 'none':
f['preference'] = -40
elif f.get('vcodec') == 'none':
f['preference'] = -50
height = f.get('height')
if not height:
continue
vbr = self._search_regex(
r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
if vbr:
f['vbr'] = int(vbr)
formats.extend(m3u8_formats)
else:
vbr = int_or_none(s.get('bitrate'))
formats.append({
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type,
'vbr': vbr,
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'filesize': int_or_none(s.get('filesize')),
'url': s_url,
'preference': -1,
})
self._sort_formats(
formats, ('preference', 'width', 'height', 'vbr', 'filesize', 'tbr', 'ext', 'format_id'))
subtitles = {}
for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []):
subtitle_url = subtitle.get('url')
if subtitle_url:
subtitles.setdefault('en', []).append({'url': subtitle_url})
return {
'id': uuid,
'title': self._live_title(title) if is_live else title,
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
'description': try_get(video, lambda x: x['subheadlines']['basic']),
'formats': formats,
'duration': int_or_none(video.get('duration'), 100),
'timestamp': parse_iso8601(video.get('created_date')),
'subtitles': subtitles,
'is_live': is_live,
}
|
[
"[email protected]"
] | |
b1effa43176d77ba4cd5d71fe491629591f33413
|
978a0ff297cfe68baa8b62a30aaacefa3efdd48d
|
/flaskfiles/flaskpractice.py
|
4fe3f0d5817107d5f5eb70af42986722a0a65e5e
|
[] |
no_license
|
pavi535/pythonpratice
|
d55f263cf4170ace3fa8ba7f4a26d67f950af7ce
|
9f66be3e609f2b4fbc1a035e67d6fcf08992818a
|
refs/heads/main
| 2023-08-27T06:38:30.446752 | 2021-11-10T03:03:24 | 2021-11-10T03:03:24 | 426,094,134 | 0 | 0 | null | 2021-11-10T03:03:25 | 2021-11-09T04:46:11 |
Python
|
UTF-8
|
Python
| false | false | 2,593 |
py
|
from datetime import datetime
from flask import Flask, render_template, url_for, flash, redirect
from flask_sqlalchemy import SQLAlchemy
from forms import RegistrationForm, LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db=SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
posts=[{'Product_name': 'car',
'car_make':'Toyota',
'car_year': 2019,
'car_description':'It is a black car with 52000 miles on it' },
{'Product_name': 'jeep',
'car_make':'Wrangler',
'car_year': 2020,
'car_description':'It is a black car with 12000 miles on it' }
]
@app.route('/')
def home():
return render_template('home.html', posts=posts)
@app.route('/help')
def help():
return render_template('help.html', title='help')
@app.route("/register", methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('home'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
if form.email.data == '[email protected]' and form.password.data == 'password':
flash('You have been logged in!', 'success')
return redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check username and password', 'danger')
return render_template('login.html', title='Login', form=form)
if __name__=='__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
8a995bd441b1bf0410c40c17856e88cacb7fdc00
|
840ca6face6cb369104eec228fe7b51630bd10f1
|
/剑指offer/52-两个链表的第一个公共节点.py
|
ca8ed7ed73921386ff21cac0c21a687a57434913
|
[] |
no_license
|
Leofighting/Practice-on-LeetCode
|
56e6245eb03f76ca254e54dc0a0cdd2c71ec3dd0
|
6d7dad991922abe862f19009b261b5146e059955
|
refs/heads/master
| 2021-08-16T04:21:04.699124 | 2020-06-29T22:48:38 | 2020-06-29T22:48:38 | 197,718,371 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 458 |
py
|
# -*- coding:utf-8 -*-
__author__ = "leo"
# 输入两个链表,找出它们的第一个公共节点。
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def get_intersection_node(self, headA, headB):
node1, node2 = headA, headB
while node1 != node2:
node1 = node1.next if node1 else headB
node2 = node2.next if node2 else headA
return node1
|
[
"[email protected]"
] | |
b06e3ace791dfcd120050816b47cf3cea36e3caf
|
056adbbdfb968486ecc330f913f0de6f51deee33
|
/609-find-duplicate-file-in-system/find-duplicate-file-in-system.py
|
369212a29b568e52d671b267faa76bb344d532b9
|
[] |
no_license
|
privateHmmmm/leetcode
|
b84453a1a951cdece2dd629c127da59a4715e078
|
cb303e610949e953b689fbed499f5bb0b79c4aea
|
refs/heads/master
| 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,759 |
py
|
# -*- coding:utf-8 -*-
# Given a list of directory info including directory path, and all the files with contents in this directory, you need to find out all the groups of duplicate files in the file system in terms of their paths.
#
# A group of duplicate files consists of at least two files that have exactly the same content.
#
# A single directory info string in the input list has the following format:
# "root/d1/d2/.../dm f1.txt(f1_content) f2.txt(f2_content) ... fn.txt(fn_content)"
# It means there are n files (f1.txt, f2.txt ... fn.txt with content f1_content, f2_content ... fn_content, respectively) in directory root/d1/d2/.../dm. Note that n >= 1 and m >= 0. If m = 0, it means the directory is just the root directory.
#
# The output is a list of group of duplicate file paths. For each group, it contains all the file paths of the files that have the same content. A file path is a string that has the following format:
# "directory_path/file_name.txt"
#
#
# Example 1:
#
# Input:
# ["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
# Output:
# [["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
#
#
#
#
# Note:
#
# No order is required for the final output.
# You may assume the directory name, file name and file content only has letters and digits, and the length of file content is in the range of [1,50].
# The number of files given is in the range of [1,20000].
# You may assume no files or directories share the same name in the same directory.
# You may assume each given directory info represents a unique directory. Directory path and file info are separated by a single blank space.
#
#
#
#
# Follow-up beyond contest:
#
# Imagine you are given a real file system, how will you search files? DFS or BFS?
# If the file content is very large (GB level), how will you modify your solution?
# If you can only read the file by 1kb each time, how will you modify your solution?
# What is the time complexity of your modified solution? What is the most time-consuming part and memory consuming part of it? How to optimize?
# How to make sure the duplicated files you find are not false positive?
#
class Solution(object):
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
dicts = collections.defaultdict(list)
for path in paths:
files = path.split(" ")
dir = files[0]
for f in files[1:]:
filename,_,content = f.partition('(')
dicts[content[:-1]].append(dir+'/'+filename)
return [g for g in dicts.values() if len(g)>1]
|
[
"[email protected]"
] | |
7863e3a9c1b084f2424bfe6b8e926d7afd714b98
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04030/s580737724.py
|
e5017d598b9bc2f7e817eadffadd9076c0229da3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 308 |
py
|
S = input()
Z = []
X = len(S)
i = 0
while i < X :
if S[i] == "0":
Z.append(0)
elif S[i] == "1":
Z.append(1)
elif S[i] == "B":
if len(Z)== 0:
pass
else:
Z.pop()
i += 1
i = 0
X = len(Z)
while i < X:
print(Z[i] , end ="")
i +=1
|
[
"[email protected]"
] | |
3267827c6172fd22712c30402e7fc68868d81061
|
42b84b02e64d21234372501a20bf820e0bcbf281
|
/site/threath/apps/user_profiles/views.py
|
2054832130d5f203d6bf0ea498dde605276bad9c
|
[] |
no_license
|
gage/proto
|
861d1e1190770b0cc74f51a6fe140157cc0ac12e
|
e13ac7d0ee5c6acce2557dcf71a00a941543c006
|
refs/heads/master
| 2020-04-06T06:44:01.712532 | 2013-06-28T06:30:59 | 2013-06-28T06:30:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,876 |
py
|
import time
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.auth.models import User
from globals.utils import bigpipe_pagelet
def verify_email(request):
'''
When user click the activation link, the email will be verified.
'''
activation_code = request.GET.get('activation_code')
email = request.GET.get('email')
uid = request.GET.get('id')
# print "activation_code: %s" % activation_code
# print "email: %s" % email
# print "id: %s" % uid
user = User.objects.get(id=uid)
# print user
profile = user.get_profile()
if profile.verify_email(email, activation_code):
return HttpResponse("Email has been verified successfully.")
else:
return HttpResponse("This activation code is expired.")
@login_required
def user_main(request, user_id=None):
def stream_response_generator():
context = {
'BIG_PIPE': True
}
base_view = render_to_string("main.html", context, context_instance=RequestContext(request))
yield base_view.ljust(4096)
yield bp_testpagelet(request).ljust(4096)
yield render_to_string("bp_page_end.html", {}, context_instance=RequestContext(request))
return HttpResponse(stream_response_generator(), mimetype='text/html', stream_content=True)
@bigpipe_pagelet
def bp_testpagelet(request):
innerHTML = render_to_string("bp_testpagelet.html", {'BIG_PIPE': True}, context_instance=RequestContext(request))
return ['testpagelet',
innerHTML,
'chatRoom/chatRoom',
['base.css','test.css']
]
|
[
"[email protected]"
] | |
0039e5a8aec878cb771be2ecdc89116f71a7cd5f
|
48ff6b01dabc631c8924f3c51996010d9e0d2086
|
/psypl/experiments/variable_count.py
|
8f9bdaeef8f1030d231ba50b2ed17beb7e2c70bb
|
[] |
no_license
|
willcrichton/psypl-experiments
|
b4522908f17ba9fbc023fa627a260e645a511bc4
|
7b0a134cc17919e62707d005fc03f2e22938eb13
|
refs/heads/master
| 2022-12-18T01:41:20.964024 | 2021-02-13T00:46:55 | 2021-02-13T00:46:55 | 219,410,440 | 3 | 0 | null | 2022-12-07T14:42:13 | 2019-11-04T03:33:04 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,326 |
py
|
from enum import Enum
from ..utils import random_tree, ConstNode, OpNode, all_names, shuffle, try_int
from random import sample
from itertools import combinations, product
from ..base import Experiment
from pprint import pprint
import pandas as pd
import numpy as np
import experiment_widgets
class VariableCountExperiment(Experiment):
Widget = experiment_widgets.FunctionBasicExperiment
all_n_op = [6, 9]
all_n_var = [0, 2, 4]
class Condition(Enum):
#Random = 1
Even = 1
Frontloaded = 2
def generate_experiment(self, N_trials=24):
conditions = list(product(self.all_n_var, self.all_n_op, list(self.Condition)))
return {
"trials": shuffle(
[
self.generate_trial(*conds)
for conds in conditions
for _ in range(N_trials // len(conditions))
]
),
"between_trials_time": 4000,
}
def node_size(self, t, idxs):
if isinstance(t, OpNode):
lmap, lsize = self.node_size(t.left, idxs)
rmap, rsize = self.node_size(t.right, idxs)
size = lsize + rsize + 1
if t.index in idxs:
return {t.index: size, **lmap, **rmap}, 0
else:
return {**lmap, **rmap}, size
else:
return {}, 0
def generate_trial(self, N_var, N_op, cond):
tree = random_tree(N_op)
if N_var > 0:
coverings = pd.DataFrame([{
'sizes': self.node_size(tree, idxs)[0],
'remaining': self.node_size(tree, idxs)[1],
'idxs': idxs
} for idxs in combinations(list(range(N_op-1)), N_var)])
coverings['size_seq'] = coverings.apply(
lambda row: [t[1] for t in sorted(row.sizes.items(), key=lambda t: t[0])] + [row.remaining],
axis=1)
if cond == self.Condition.Even:
coverings.score = coverings.size_seq.map(lambda seq: np.std(seq))
elif cond == self.Condition.Frontloaded:
def compute_score(seq):
return np.sum([(i+1) * seq[i] for i in range(len(seq))])
coverings['score'] = coverings.size_seq.map(compute_score)
best_rows = coverings[coverings.score == coverings.score.min()]
row = best_rows.sample().iloc[0]
indices = row.idxs
size_seq = row.size_seq
names = sample(all_names, k=N_var)
defs, call = tree.to_mixed_str({i: n for i, n in zip(indices, names)})
else:
defs = []
call = tree.to_paren_str()
size_seq = [N_op]
program = '\n'.join(defs + [call])
globls = {}
exec(program, globls, globls)
answer = eval(call, globls, globls)
return {
'program': program,
'call': call if N_var > 0 else None,
'cond': str(cond),
'N_var': N_var,
'N_op': N_op,
'size_seq': size_seq,
'answer': str(answer)
}
def eval_trial(self, trial, result):
return {
"correct": 1 if int(trial["answer"]) == try_int(result["response"]) else 0,
"cond": trial["cond"]
}
|
[
"[email protected]"
] | |
ab5a8efe6ee474ebb3d0874bd150540fd5990e8f
|
b05ae08859d3b593b6c815a10e0705e13c1ae1eb
|
/RinoNakasone/RinoNakasone/spiders/piaohua.py
|
46b6392f62d63419047047495d160ab00d756622
|
[] |
no_license
|
jacksonyoudi/Rino_nakasone_backend
|
32425bcd9087384fa25db1fe51e854b7a4f1fa12
|
e838668a6f67a6a4eca52d7658ad84b61b4123db
|
refs/heads/master
| 2021-04-15T18:21:17.678794 | 2019-03-02T15:16:30 | 2019-03-02T15:16:30 | 126,698,903 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,131 |
py
|
# -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from RinoNakasone.settings import PIAOHUA
class PiaohuaSpider(scrapy.Spider):
name = 'piaohua'
allowed_domains = ['www.piaohua.com']
start_urls = ['http://www.piaohua.com/']
def parse(self, response):
html_doc = response.body
soup = BeautifulSoup(html_doc, 'html.parser')
for i in soup.find_all('a', class_="img"):
if i.attrs.get('href'):
url = i.attrs.get('href')
full_url = urljoin(PIAOHUA, url)
yield scrapy.Request(full_url, callback=self.parse_detail)
next_url = urljoin(response.url.split('list_')[0],
soup.find('div', class_='page tk').find_all('a')[-2].attrs.get('href'))
yield scrapy.Request(next_url, callback=self.parse)
def parse_detail(self, response):
item = IreadweekItem()
html_doc = response.body
soup = BeautifulSoup(html_doc, 'html.parser')
img_url = urljoin(CDN, soup.find('img').attrs.get('src').replace('//', '/'))
download_url = soup.find('a', class_='downloads').attrs.get('href')
title = soup.find_all('div', class_='hanghang-za-title')
name = title[0].text
content = soup.find_all('div', class_='hanghang-za-content')
author_info = content[0].text
directory = '\n'.join([i.text.replace("\u3000", '') for i in content[1].find_all('p')])
info = soup.find('div', class_='hanghang-shu-content-font').find_all('p')
author = info[0].text.split('作者:')[1]
category = info[1].text.split('分类:')[1]
score = info[2].text.split('豆瓣评分:')[1]
introduction = info[4].text
item['name'] = name
item['img_url'] = img_url
item['download_url'] = download_url
item['author'] = author
item['author_info'] = author_info
item['category'] = category
item['score'] = score
item['introduction'] = introduction
item['directory'] = directory
return item
|
[
"[email protected]"
] | |
9565c6008d359c9ef4776815146440ba81e91136
|
a4f2d74559b00191454d7d3492f8d35d118332b5
|
/src/atra/plot/network_air.py
|
15d33b561b56963e9c5b77d2ee76eb5a2084872d
|
[
"MIT"
] |
permissive
|
nfontan/argentina-transport
|
c4b6f06a33034ce1c3ce905f901ff5086013b38b
|
f1583b077844e6b20b2c81144dec0872c88bdb80
|
refs/heads/master
| 2023-03-18T10:23:44.580084 | 2019-08-11T22:01:34 | 2019-08-11T22:01:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,910 |
py
|
"""Plot air network
"""
import os
import cartopy.crs as ccrs
import geopandas
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from atra.utils import load_config, get_axes, plot_basemap, scale_bar, plot_basemap_labels, save_fig
def main(config):
"""Read shapes, plot map
"""
data_path = config['paths']['data']
# data
output_file = os.path.join(config['paths']['figures'], 'network-air-map.png')
air_edge_file = os.path.join(data_path, 'network', 'air_edges.shp')
air_node_file = os.path.join(data_path, 'network', 'air_nodes.shp')
# air_usage_file = os.path.join(data_path, 'usage', 'air_passenger.csv')
# basemap
proj_lat_lon = ccrs.PlateCarree()
ax = get_axes()
plot_basemap(ax, data_path)
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, data_path, include_regions=False)
colors = {
'Air route': '#252525',
'Airport': '#d95f0e'
}
# edges
edges = geopandas.read_file(air_edge_file)
ax.add_geometries(
list(edges.geometry),
crs=proj_lat_lon,
linewidth=1.5,
edgecolor=colors['Air route'],
facecolor='none',
zorder=4
)
# edges merged with usage
# usage = pandas.read_csv(air_usage_file)
# edges_with_usage = edges.merge(usage[['id', 'passengers_2016']], on='id')
# nodes
nodes = geopandas.read_file(air_node_file)
ax.scatter(
list(nodes.geometry.x),
list(nodes.geometry.y),
transform=proj_lat_lon,
facecolor=colors['Airport'],
s=12,
zorder=5
)
# legend
legend_handles = [
mpatches.Patch(color=color, label=label)
for label, color in colors.items()
]
plt.legend(handles=legend_handles, loc='lower left')
# save
save_fig(output_file)
if __name__ == '__main__':
CONFIG = load_config()
main(CONFIG)
|
[
"[email protected]"
] | |
2264e15313f69e818f1bbdd697aae79e592592ad
|
4273f162abb12ef1939271c2aabee9547ac6afee
|
/crowd/utils/config.py
|
054ef5faf1c8d84308890a15230a3b194adf10e5
|
[] |
no_license
|
xiyuhao/subins_tutorials
|
2717c47aac0adde099432e5dfd231606bf45a266
|
acbe4fe16483397e9b0f8e240ca23bdca652b92d
|
refs/heads/master
| 2023-07-28T13:42:41.445399 | 2021-09-12T11:02:37 | 2021-09-12T11:02:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
'''
config.py 0.0.1
Date: January 15, 2019
Last modified: June 14, 2019
Author: Subin. Gopi([email protected])
# Copyright(c) 2019, Subin Gopi
# All rights reserved.
# WARNING! All changes made in this file will be lost!
Description
None.
'''
def get_conig():
return 'Linux', 'maya', '2016', '2.7.5'
def get_tool_kit():
tools = {
'create': ['create', 'Create', '0.0.1'],
'publish': ['publish', 'Publish', '0.0.1']
}
return tools
|
[
"[email protected]"
] | |
6ef4885b55b2959e9db0e836280c30f7bf832629
|
a2860dd0acbb7b85d30fad1be52512fa7bc4c611
|
/cerebralcortex/core/file_manager/read_handler.py
|
f1d9eb4c750b94afc422959e5232bc2448e3825c
|
[
"BSD-2-Clause"
] |
permissive
|
hippietilley/CerebralCortex-Kernel
|
b1783c8156744f7809c9a3810b990c45945da936
|
c7dac033d9561f14bdb72430577db6ae4e3c7911
|
refs/heads/master
| 2020-04-18T15:15:47.199601 | 2019-01-18T16:05:14 | 2019-01-18T16:05:14 | 167,607,878 | 0 | 0 |
BSD-2-Clause
| 2019-01-25T20:16:54 | 2019-01-25T20:16:54 | null |
UTF-8
|
Python
| false | false | 6,069 |
py
|
# Copyright (c) 2018, MD2K Center of Excellence
# - Nasir Ali <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import gzip
import json
import traceback
from typing import List
from pympler import asizeof
from cerebralcortex.core.datatypes.datastream import DataStream, DataPoint
from cerebralcortex.core.datatypes.stream_types import StreamTypes
class ReadHandler():
def read_file(self, filepath: str) -> str:
"""
Read a file and return contents
:param filepath:
:return: file contents
:rtype: str
"""
if not filepath:
raise ValueError("File path is required field.")
with open(filepath, "r") as file:
data = file.read()
file.close()
return data
def file_processor(self, msg: dict, zip_filepath: str) -> DataStream:
"""
Process a Kafka or MySQL msg. Parse compressed files. Convert json metadata and data in DataStream object.
:param msg:
:param zip_filepath:
:return: DataStream object with metadata and data
:rtype: DataStream
"""
if not isinstance(msg["metadata"], dict):
metadata_header = json.loads(msg["metadata"])
else:
metadata_header = msg["metadata"]
identifier = metadata_header["identifier"]
owner = metadata_header["owner"]
name = metadata_header["name"]
data_descriptor = metadata_header["data_descriptor"]
execution_context = metadata_header["execution_context"]
if "annotations" in metadata_header:
annotations = metadata_header["annotations"]
else:
annotations = {}
if "stream_type" in metadata_header:
stream_type = metadata_header["stream_type"]
else:
stream_type = StreamTypes.DATASTREAM
try:
gzip_file_content = self.get_gzip_file_contents(zip_filepath + msg["filename"])
datapoints = list(map(lambda x: self.row_to_datapoint(x), gzip_file_content.splitlines()))
# self.rename_file(zip_filepath + msg["filename"])
start_time = datapoints[0].start_time
end_time = datapoints[len(datapoints) - 1].end_time
ds = DataStream(identifier,
owner,
name,
data_descriptor,
execution_context,
annotations,
stream_type,
start_time,
end_time,
datapoints)
return ds
except Exception as e:
self.logging.log(error_message="In Kafka preprocessor - Error in processing file: " + str(
msg["filename"]) + " Owner-ID: " + owner + "Stream Name: " + name + " - " + str(traceback.format_exc()),
error_type=self.logtypes.CRITICAL)
return DataStream
def row_to_datapoint(self, row: str) -> DataPoint:
"""
Format data based on mCerebrum's current GZ-CSV format into what Cerebral
Cortex expects
:param row:
:return: single DataPoint
:rtype: DataPoint
"""
ts, offset, values = row.split(',', 2)
ts = int(ts) / 1000.0
offset = int(offset)
timezone = datetime.timezone(datetime.timedelta(milliseconds=offset))
ts = datetime.datetime.fromtimestamp(ts, timezone)
return DataPoint(start_time=ts, sample=values)
def get_gzip_file_contents(self, filepath: str) -> str:
"""
Read and return gzip compressed file contents
:param filepath:
:return: gzip_file_content
:rtype: str
"""
fp = gzip.open(filepath)
gzip_file_content = fp.read()
fp.close()
gzip_file_content = gzip_file_content.decode('utf-8')
return gzip_file_content
def get_chunk_size(self, data: List[DataPoint]) -> int:
"""
get chunk size of DataPoint objects in 0.75 MB blocks. This method is computationally heavy and not scalable.
:param data:
:return: size of a list
:rtype: int
"""
if len(data) > 0:
chunk_size = 750000 / (asizeof.asizeof(data) / len(data)) # 0.75MB chunk size without metadata
return round(chunk_size)
else:
return 0
def chunks(data: str, max_len: int) -> str:
"""
Yields max_len sized chunks with the remainder in the last
:param data:
:param max_len:
"""
# TODO: default yield value needs to be set
for i in range(0, len(data), max_len):
yield data[i:i + max_len]
|
[
"[email protected]"
] | |
996925f5530dd5af83fba2da9dd72a0012fcef11
|
a0f27e45f598a5c4145efa44ae05edf431b7e06f
|
/seqmod/modules/ff.py
|
4d6a8e6043e089cd4a0ed05da4145de8fd6bf3c5
|
[] |
no_license
|
cmry/seqmod
|
af4d2e6227247f5d3630a53818328cea493672f4
|
ddc57cd36c6b6204263db770f4c98923ffb4ba0b
|
refs/heads/master
| 2021-09-11T23:50:01.261133 | 2018-01-09T15:51:23 | 2018-01-09T15:51:23 | 113,448,571 | 0 | 0 | null | 2017-12-07T12:31:43 | 2017-12-07T12:31:43 | null |
UTF-8
|
Python
| false | false | 5,677 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
class MLP(nn.Module):
"""
Standard MLP
"""
def __init__(self, inp_size, hid_size, nb_classes,
nb_layers=1, dropout=0.0, act='relu'):
self.inp_size, self.hid_size = inp_size, hid_size
self.nb_layers, self.nb_classes = nb_layers, nb_classes
self.dropout, self.act = dropout, act
super(MLP, self).__init__()
layers = []
for i in range(nb_layers):
layers.append(nn.Linear(inp_size, hid_size))
inp_size = hid_size
self.layers = nn.ModuleList(layers)
self.output = nn.Linear(hid_size, nb_classes)
def forward(self, inp):
"""
:param inp: torch.FloatTensor (batch_size x inp_size)
:return: torch.FloatTensor (batch_size x nb_classes)
"""
# hidden layers
for layer in self.layers:
out = layer(inp)
if self.act is not None:
out = getattr(F, self.act)(out)
if self.dropout > 0:
out = F.dropout(out, p=self.dropout, training=self.training)
inp = out
# output projection
out = self.output(out)
return out
class MaxOut(nn.Module):
def __init__(self, in_dim, out_dim, k):
"""
Implementation of MaxOut:
h_i^{maxout} = max_{j \in [1, ..., k]} x^T W_{..., i, j} + b_{i, j}
where W is in R^{D x M x K}, D is the input size, M is the output size
and K is the number of pieces to max-pool from. (i.e. i ranges over M,
j ranges over K and ... corresponds to the input dimension)
Parameters:
-----------
in_dim: int, Input dimension
out_dim: int, Output dimension
k: int, number of "pools" to max over
Returns:
--------
out: torch.Tensor (batch x k)
"""
self.in_dim, self.out_dim, self.k = in_dim, out_dim, k
super(MaxOut, self).__init__()
self.projection = nn.Linear(in_dim, k * out_dim)
def forward(self, inp):
"""
Because of the linear projection we are bound to 1-d input
(excluding batch-dim), therefore there is no need to generalize
the implementation to n-dimensional input.
"""
batch, in_dim = inp.size()
# (batch x self.k * self.out_dim) -> (batch x self.out_dim x self.k)
out = self.projection(inp).view(batch, self.out_dim, self.k)
out, _ = out.max(2)
return out
class Highway(torch.nn.Module):
"""
Reference:
https://github.com/allenai/allennlp/blob/master/allennlp/modules/highway.py
A `Highway layer <https://arxiv.org/abs/1505.00387>`_ does a gated
combination of a linear transformation and a non-linear transformation
of its input. y = g * x + (1 - g) * f(A(x)), where A
is a linear transformation, `f` is an element-wise non-linearity,
and `g` is an element-wise gate, computed as sigmoid(B(x)).
Parameters
----------
input_dim: int, The dimensionality of `x`.
num_layers: int, optional, The number of highway layers.
activation: str or class, if string it should be an activation function
from torch.nn, otherwise it should be a class that will be instantiated
with kwargs for each layer.
dropout: float, dropout rate before the nonlinearity
"""
def __init__(self, input_dim, num_layers=1, activation='ReLU', dropout=0.0,
**kwargs):
self.input_dim = input_dim
self.dropout = dropout
super(Highway, self).__init__()
layers = []
for layer in range(num_layers):
if isinstance(activation, type): # custom activation class
nonlinear = activation(**kwargs)
else: # assume string
nonlinear = getattr(nn, activation)()
linear = nn.Linear(input_dim, input_dim * 2)
# We should bias the highway layer to just carry its input forward.
# We do that by setting the bias on B(x) to be positive, because
# that means `g` will be biased to be high, to we will carry the
# input forward. The bias on `B(x)` is the second half of the bias
# vector in each Linear layer.
linear.bias[input_dim:].data.fill_(1)
linear.bias.custom = True
layers.append(linear)
layers.append(nonlinear)
self.layers = torch.nn.ModuleList(layers)
def forward(self, inputs):
current_input = inputs
for i in range(0, len(self.layers), 2):
layer, activation = self.layers[i], self.layers[i+1]
proj, linear = layer(current_input), current_input
proj = F.dropout(proj, p=self.dropout, training=self.training)
nonlinear = activation(proj[:, 0:self.input_dim])
gate = F.sigmoid(proj[:, self.input_dim:(2 * self.input_dim)])
# apply gate
current_input = gate * linear + (1 - gate) * nonlinear
return current_input
# gracefully taken from:
# https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
class GradReverse(Function):
"Implementation of GRL from DANN (Domain Adaptation Neural Network) paper"
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
"""
GRL must be placed between the feature extractor and the domain classifier
"""
return GradReverse.apply(x)
|
[
"[email protected]"
] | |
fd75ee444727d1fd69c72d0457b9ea145dcba2b1
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225401.py
|
2181904c869c47b47b88084f2f73feb9b48ff6f0
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null |
UTF-8
|
Python
| false | false | 523 |
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.scv')
|
[
"[email protected]"
] | |
8536e0cce05e6fee39144b2b1c6e1b5c482b510f
|
8064bbf3dadc70c3aceeecd885bc69cfddf06549
|
/ZeeAnalyzer/test/runElectronPlots_Skim_v1.py
|
9dc1a84ee437d62c8d259a0380e2392dbbaec102
|
[] |
no_license
|
taroni/ZeeAnalyzer
|
6faf7e4d9785ab9b15559d096a2b98d5e7483be7
|
44046f7095a22a9b5486a5ab0aee2dee52b430ae
|
refs/heads/master
| 2022-01-17T20:12:06.695267 | 2017-11-24T13:51:25 | 2017-11-24T13:51:25 | 110,087,363 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,560 |
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TestElectrons")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag = GlobalTag(process.GlobalTag, '92X_upgrade2017_realistic_Candidate_forECALStudies', '')
# input
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
inputFilesData = cms.untracked.vstring(
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/030/00000/E69F63AA-EE8E-E711-8121-02163E019BAF.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/008329E5-368F-E711-A1CD-02163E01A21D.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/04BB9D82-398F-E711-B74B-02163E019BDF.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/407638D4-4B8F-E711-AC24-02163E01437E.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/44B91A0E-488F-E711-A372-02163E019CA5.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/5479D9DF-3C8F-E711-BCF4-02163E01A5EB.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/6496C386-518F-E711-B09E-02163E01341D.root'
)
inputFilesMC = cms.untracked.vstring(
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/00CDB4C7-5C93-E711-AF33-02163E0142CA.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/027E1441-3994-E711-BFBD-02163E01A6D8.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/02FD6F07-5D93-E711-85AC-02163E01A334.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/061B6C49-5793-E711-AF23-02163E011B7C.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/0A66322F-5793-E711-9184-02163E01A2BD.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/0EFBF8C4-5C93-E711-94C9-02163E012207.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/14FDD26B-7493-E711-8B21-001E67792532.root'
)
inputFiles = inputFilesMC
outputFile = "electron_ntuple.root"
process.source = cms.Source ("PoolSource", fileNames = inputFiles )
process.ntupler = cms.EDAnalyzer(
'ElectronPlots',
beamSpot = cms.InputTag('offlineBeamSpot'),
genEventInfoProduct = cms.InputTag('generator'),
electrons = cms.InputTag("gedGsfElectrons"),
genParticles = cms.InputTag("genParticles"),
vertices = cms.InputTag("offlinePrimaryVertices"),
conversions = cms.InputTag('allConversions'),
isMC = cms.bool(True)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string( outputFile )
)
process.load("DPGAnalysis/Skims/ZElectronSkim_cff")
process.p = cms.Path(process.zdiElectronSequence*process.ntupler)
|
[
"[email protected]"
] | |
e4ede140050fb8c241173693253719a2d0235799
|
799c9d7e1436232a02b213178ed0bda9d5c673e8
|
/Chapter15/example2.py
|
b3c8ae550f592b84515a5257e78fd403bf0171f4
|
[
"MIT"
] |
permissive
|
KrisNguyen135/Advanced-Python-Programming-Second-Edition
|
a32578116805285983df8eac2dba584e0e77ea0d
|
e5d473e3efc5f6590028cb3f318e1f4aeb0aadd1
|
refs/heads/main
| 2023-08-14T18:14:09.087485 | 2021-09-19T17:57:03 | 2021-09-19T17:57:03 | 373,899,665 | 0 | 0 |
MIT
| 2021-06-04T16:23:55 | 2021-06-04T16:23:55 | null |
UTF-8
|
Python
| false | false | 686 |
py
|
import time
import threading
COUNT = 50000000
def countdown(n):
while n > 0:
n -= 1
###########################################################################
start = time.time()
countdown(COUNT)
print('Sequential program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
###########################################################################
thread1 = threading.Thread(target=countdown, args=(COUNT // 2,))
thread2 = threading.Thread(target=countdown, args=(COUNT // 2,))
start = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print('Concurrent program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
|
[
"[email protected]"
] | |
a38e00bd15b7f69cd0501f9e2a9343c1615f935c
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/83f8ec00a3cf40a78f2fd2fa2dedcd3a.py
|
76cb33cce300805428c65c319f8169dd9e0ef049
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 270 |
py
|
import re
def hey(input):
clean_input = input.strip()
if clean_input == '':
return 'Fine. Be that way!'
if clean_input.isupper():
return 'Whoa, chill out!'
if clean_input.endswith('?'):
return 'Sure.'
return 'Whatever.'
|
[
"[email protected]"
] | |
bcee99a9a701fa5486e9c1baba62c7e8182cc60d
|
45de7d905486934629730945619f49281ad19359
|
/xlsxwriter/test/worksheet/test_date_time_01.py
|
50180b311c94a156c9af3597a1e11e5fb953c101
|
[
"BSD-2-Clause"
] |
permissive
|
jmcnamara/XlsxWriter
|
599e1d225d698120ef931a776a9d93a6f60186ed
|
ab13807a1be68652ffc512ae6f5791d113b94ee1
|
refs/heads/main
| 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 |
BSD-2-Clause
| 2023-08-28T18:52:14 | 2013-01-04T01:07:06 |
Python
|
UTF-8
|
Python
| false | false | 6,888 |
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
import unittest
from datetime import datetime
from ...worksheet import Worksheet
class TestConvertDateTime(unittest.TestCase):
"""
Test the Worksheet _convert_date_time() method against dates extracted
from Excel.
"""
def setUp(self):
self.worksheet = Worksheet()
def test_convert_date_time(self):
"""Test the _convert_date_time() method."""
# Dates and corresponding numbers from an Excel file.
excel_dates = [
("1899-12-31T00:00:00.000", 0),
("1982-08-25T00:15:20.213", 30188.010650613425),
("2065-04-19T00:16:48.290", 60376.011670023145),
("2147-12-15T00:55:25.446", 90565.038488958337),
("2230-08-10T01:02:46.891", 120753.04359827546),
("2313-04-06T01:04:15.597", 150942.04462496529),
("2395-11-30T01:09:40.889", 181130.04838991899),
("2478-07-25T01:11:32.560", 211318.04968240741),
("2561-03-21T01:30:19.169", 241507.06272186342),
("2643-11-15T01:48:25.580", 271695.07529606484),
("2726-07-12T02:03:31.919", 301884.08578609955),
("2809-03-06T02:11:11.986", 332072.09111094906),
("2891-10-31T02:24:37.095", 362261.10042934027),
("2974-06-26T02:35:07.220", 392449.10772245371),
("3057-02-19T02:45:12.109", 422637.1147234838),
("3139-10-17T03:06:39.990", 452826.12962951389),
("3222-06-11T03:08:08.251", 483014.13065105322),
("3305-02-05T03:19:12.576", 513203.13834),
("3387-10-01T03:29:42.574", 543391.14563164348),
("3470-05-27T03:37:30.813", 573579.15105107636),
("3553-01-21T04:14:38.231", 603768.17683137732),
("3635-09-16T04:16:28.559", 633956.17810832174),
("3718-05-13T04:17:58.222", 664145.17914608796),
("3801-01-06T04:21:41.794", 694333.18173372687),
("3883-09-02T04:56:35.792", 724522.20596981479),
("3966-04-28T05:25:14.885", 754710.2258667245),
("4048-12-21T05:26:05.724", 784898.22645513888),
("4131-08-18T05:46:44.068", 815087.24078782403),
("4214-04-13T05:48:01.141", 845275.24167987274),
("4296-12-07T05:53:52.315", 875464.24574438657),
("4379-08-03T06:14:48.580", 905652.26028449077),
("4462-03-28T06:46:15.738", 935840.28212659725),
("4544-11-22T07:31:20.407", 966029.31343063654),
("4627-07-19T07:58:33.754", 996217.33233511576),
("4710-03-15T08:07:43.130", 1026406.3386936343),
("4792-11-07T08:29:11.091", 1056594.3536005903),
("4875-07-04T09:08:15.328", 1086783.3807329629),
("4958-02-27T09:30:41.781", 1116971.3963169097),
("5040-10-23T09:34:04.462", 1147159.3986627546),
("5123-06-20T09:37:23.945", 1177348.4009715857),
("5206-02-12T09:37:56.655", 1207536.4013501736),
("5288-10-08T09:45:12.230", 1237725.406391551),
("5371-06-04T09:54:14.782", 1267913.412671088),
("5454-01-28T09:54:22.108", 1298101.4127558796),
("5536-09-24T10:01:36.151", 1328290.4177795255),
("5619-05-20T12:09:48.602", 1358478.5068125231),
("5702-01-14T12:34:08.549", 1388667.5237100578),
("5784-09-08T12:56:06.495", 1418855.5389640625),
("5867-05-06T12:58:58.217", 1449044.5409515856),
("5949-12-30T12:59:54.263", 1479232.5416002662),
("6032-08-24T13:34:41.331", 1509420.5657561459),
("6115-04-21T13:58:28.601", 1539609.5822754744),
("6197-12-14T14:02:16.899", 1569797.5849178126),
("6280-08-10T14:36:17.444", 1599986.6085352316),
("6363-04-06T14:37:57.451", 1630174.60969272),
("6445-11-30T14:57:42.757", 1660363.6234115392),
("6528-07-26T15:10:48.307", 1690551.6325035533),
("6611-03-22T15:14:39.890", 1720739.635183912),
("6693-11-15T15:19:47.988", 1750928.6387498612),
("6776-07-11T16:04:24.344", 1781116.6697262037),
("6859-03-07T16:22:23.952", 1811305.6822216667),
("6941-10-31T16:29:55.999", 1841493.6874536921),
("7024-06-26T16:58:20.259", 1871681.7071789235),
("7107-02-21T17:04:02.415", 1901870.7111390624),
("7189-10-16T17:18:29.630", 1932058.7211762732),
("7272-06-11T17:47:21.323", 1962247.7412190163),
("7355-02-05T17:53:29.866", 1992435.7454845603),
("7437-10-02T17:53:41.076", 2022624.7456143056),
("7520-05-28T17:55:06.044", 2052812.7465977315),
("7603-01-21T18:14:49.151", 2083000.7602910995),
("7685-09-16T18:17:45.738", 2113189.7623349307),
("7768-05-12T18:29:59.700", 2143377.7708298611),
("7851-01-07T18:33:21.233", 2173566.773162419),
("7933-09-02T19:14:24.673", 2203754.8016744559),
("8016-04-27T19:17:12.816", 2233942.8036205554),
("8098-12-22T19:23:36.418", 2264131.8080603937),
("8181-08-17T19:46:25.908", 2294319.8239109721),
("8264-04-13T20:07:47.314", 2324508.8387420601),
("8346-12-08T20:31:37.603", 2354696.855296331),
("8429-08-03T20:39:57.770", 2384885.8610853008),
("8512-03-29T20:50:17.067", 2415073.8682530904),
("8594-11-22T21:02:57.827", 2445261.8770581828),
("8677-07-19T21:23:05.519", 2475450.8910360998),
("8760-03-14T21:34:49.572", 2505638.8991848612),
("8842-11-08T21:39:05.944", 2535827.9021521294),
("8925-07-04T21:39:18.426", 2566015.9022965971),
("9008-02-28T21:46:07.769", 2596203.9070343636),
("9090-10-24T21:57:55.662", 2626392.9152275696),
("9173-06-19T22:19:11.732", 2656580.9299968979),
("9256-02-13T22:23:51.376", 2686769.9332335186),
("9338-10-09T22:27:58.771", 2716957.9360968866),
("9421-06-05T22:43:30.392", 2747146.9468795368),
("9504-01-30T22:48:25.834", 2777334.9502990046),
("9586-09-24T22:53:51.727", 2807522.9540709145),
("9669-05-20T23:12:56.536", 2837711.9673210187),
("9752-01-14T23:15:54.109", 2867899.9693762613),
("9834-09-10T23:17:12.632", 2898088.9702850925),
("9999-12-31T23:59:59.000", 2958465.999988426),
]
for excel_date in excel_dates:
date = datetime.strptime(excel_date[0], "%Y-%m-%dT%H:%M:%S.%f")
got = self.worksheet._convert_date_time(date)
exp = excel_date[1]
self.assertEqual(got, exp)
|
[
"[email protected]"
] | |
d0bc437a44318504958582938e6622cdb01b23a9
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2016_01_20_gsh_database_red/integrate_parallel.py
|
19794112e938772715aa9bad100ef9986045d7cc
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,702 |
py
|
import numpy as np
# import itertools as it
import db_functions as fn
import gsh_hex_tri_L0_16 as gsh
import h5py
import time
import sys
tnum = np.int64(sys.argv[1])
filename = 'log_integrate_parallel_%s.txt' % str(tnum)
""" Load Y vec """
f = h5py.File('var_extract_total.hdf5', 'r')
var_set = f.get('var_set')
sinphi = np.sin(var_set[:, 2])
Y = var_set[:, 4]
f.close
""" Initialize important variables """
# these indices are defined for the sampled db inputs
inc = 6 # degree increment for angular variables
sub2rad = inc*np.pi/180.
n_th = 60/inc # number of theta samples for FZ
n_p1 = 360/inc # number of phi1 samples for FZ
n_P = 90/inc # number of Phi samples for FZ
n_p2 = 60/inc # number of phi2 samples for FZ
N_p = 215 # number of GSH bases to evaluate
N_q = 9 # number of cosine bases to evaluate
L_th = np.pi/3.
n_eul = n_p1*n_P*n_p2
n_jobs = 10. # number of jobs submitted to cluster
""" Calculate basis function indices """
cmax = N_p*N_q # total number of permutations of basis functions
fn.WP(str(cmax), filename)
# cmat is the matrix containing all permutations of basis function indices
cmat = np.unravel_index(np.arange(cmax), [N_p, N_q])
cmat = np.array(cmat).T
""" Deal with the parallelization of this operation. specifically pick range
of indxmat to calculate """
n_ii = np.int64(np.ceil(np.float(cmax)/n_jobs)) # number dot products per job
fn.WP(str(n_ii), filename)
ii_stt = tnum*n_ii # start index
if (tnum+1)*n_ii > cmax:
ii_end = cmax
else:
ii_end = (tnum+1)*n_ii # end index
msg = "ii_stt = %s" % ii_stt
fn.WP(msg, filename)
msg = "ii_end = %s" % ii_end
fn.WP(msg, filename)
""" perform the orthogonal regressions """
coeff_prt = np.zeros(ii_end-ii_stt, dtype='complex128')
f = h5py.File('X_parts.hdf5', 'r')
c = 0
indxvec = gsh.gsh_basis_info()
bsz_gsh = ((np.pi**3)/3)/n_eul
bsz_cos = L_th/n_th
for ii in xrange(ii_stt, ii_end):
msg = str(ii)
fn.WP(msg, filename)
st = time.time()
p, q = cmat[ii, :]
basis_p = f.get('p_%s' % p)[...]
basis_q = f.get('q_%s' % q)[...]
ep_set = np.squeeze(basis_p)*basis_q
msg = "load time: %ss" % np.round(time.time()-st, 3)
fn.WP(msg, filename)
st = time.time()
l = indxvec[p, 0]
c_gsh = (1./(2.*l+1.))*(3./(2.*np.pi**2))
if q == 0:
c_cos = 1./L_th
else:
c_cos = 2./L_th
c_tot = c_gsh*c_cos*bsz_gsh*bsz_cos
tmp = c_tot*np.sum(Y*ep_set.conj()*sinphi)
del ep_set
coeff_prt[c] = tmp
msg = "regression time: %ss" % np.round(time.time()-st, 3)
fn.WP(msg, filename)
c += 1
f.close()
f = h5py.File('coeff_prt_%s.hdf5' % tnum, 'w')
f.create_dataset('coeff_prt', data=coeff_prt)
f.close()
|
[
"[email protected]"
] | |
e1f86e42b1651b24b49a852a30e9ba287c876154
|
36126f91a2d5903483b84ba2d8be77e160803058
|
/tests/test_model.py
|
2fcf991147d46894fa7d917d389309988844fd6e
|
[
"Apache-2.0"
] |
permissive
|
open-risk/transitionMatrix
|
9962bb2656eb637ba56afc3adecf42bbe68f9593
|
d05e75cbc251f01842dd8c5ce225894b988f4d99
|
refs/heads/master
| 2023-03-05T08:01:20.816425 | 2023-02-22T20:46:38 | 2023-02-22T20:46:38 | 110,365,127 | 73 | 29 |
Apache-2.0
| 2022-12-08T11:37:12 | 2017-11-11T17:25:08 |
Python
|
UTF-8
|
Python
| false | false | 5,126 |
py
|
# encoding: utf-8
# (c) 2017-2022 Open Risk, all rights reserved
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
from scipy.linalg import expm
import transitionMatrix as tm
from transitionMatrix import source_path
ACCURATE_DIGITS = 7
class TestTransitionMatrix(unittest.TestCase):
'''
Default instance (2x2 identity matrix)
'''
def test_instantiate_matrix(self):
a = tm.TransitionMatrix()
self.assertAlmostEqual(a[0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], 0.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], 0.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
b = tm.TransitionMatrix([[1.0, 3.0], [1.0, 4.0]])
self.assertAlmostEqual(b[0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[0, 1], 3.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[1, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[1, 1], 4.0, places=ACCURATE_DIGITS, msg=None, delta=None)
def test_csv_io(self):
a = tm.TransitionMatrix()
a.to_csv("test.csv")
b = tm.TransitionMatrix(csv_file="test.csv")
self.assertAlmostEqual(a[0, 0], b[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], b[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], b[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], b[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
def test_json_io(self):
a = tm.TransitionMatrix()
a.to_json("test.json")
b = tm.TransitionMatrix(json_file="test.json")
self.assertAlmostEqual(a[0, 0], b[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], b[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], b[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], b[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
def test_validation(self):
a = tm.TransitionMatrix()
self.assertEqual(a.validate(), True)
b = tm.TransitionMatrix(values=[1.0, 3.0])
self.assertEqual(b.validate()[0][0], 'Matrix Dimensions Differ: ')
c = tm.TransitionMatrix(values=[[0.75, 0.25], [0.0, 0.9]])
self.assertEqual(c.validate()[0][0], 'Rowsum not equal to one: ')
d = tm.TransitionMatrix(values=[[0.75, 0.25], [-0.1, 1.1]])
self.assertEqual(d.validate()[0][0], 'Negative Probabilities: ')
def test_generator(self):
a = tm.TransitionMatrix([[1.0, 3.0], [1.0, 4.0]])
self.assertAlmostEqual(a[0, 0], expm(a.generator())[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], expm(a.generator())[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], expm(a.generator())[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], expm(a.generator())[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
class TestTransitionMatrixSet(unittest.TestCase):
def test_instantiate_matrix_set(self):
periods = 5
a = tm.TransitionMatrixSet(dimension=2, periods=periods)
self.assertEqual(a.temporal_type, 'Incremental')
self.assertAlmostEqual(a.entries[0][0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a.entries[periods-1][0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
pass
def test_set_validation(self):
a = tm.TransitionMatrixSet(dimension=2, periods=5)
self.assertEqual(a.validate(), True)
def test_set_cumulate_incremental(self):
a = tm.TransitionMatrix(values=[[0.6, 0.2, 0.2], [0.2, 0.6, 0.2], [0.2, 0.2, 0.6]])
a_set = tm.TransitionMatrixSet(values=a, periods=3, method='Copy', temporal_type='Incremental')
b_set = a_set
b_set.cumulate()
b_set.incremental()
self.assertAlmostEqual(a_set.entries[2][0, 0], b_set.entries[2][0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
pass
def test_set_csv_io(self):
pass
def test_set_json_io(self):
pass
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
d1df29cfcfd4dace82fa7e4e728abf9975d61641
|
94615230d5733282fb69ae5d35411c04a337d353
|
/sublime-text-3/Packages/HTML-CSS-JS Prettify/src/py/utils/constants.py
|
2d082675e21dc88f0f92e4c331ef81174d4f9007
|
[
"Unlicense"
] |
permissive
|
EnTeQuAk/dotfiles
|
fcef6a885891c3c132da3ea970dd21aee16b72c1
|
b00890fa64a01b3a0e4eaaada13e90c1ef36b9e0
|
refs/heads/master
| 2023-01-04T21:09:37.330838 | 2019-09-16T14:49:45 | 2019-09-16T14:49:45 | 1,558,950 | 1 | 0 |
Unlicense
| 2023-01-04T05:01:57 | 2011-04-02T08:31:38 |
Vim script
|
UTF-8
|
Python
| false | false | 623 |
py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Various constants used by this plugin"""
from sublime import platform, version
PLATFORM = platform()
SUBLIME_VERSION = int(version())
DIAGNOSTICS_MARKER_BEGIN = b"### HTMLPrettify diagnostics begin ###"
DIAGNOSTICS_MARKER_END = b"### HTMLPrettify diagnostics end ###"
PRETTIFIED_CODE_MARKER_BEGIN = b"### HTMLPrettify prettified code begin ###"
PRETTIFIED_CODE_MARKER_END = b"### HTMLPrettify prettified code end ###"
|
[
"[email protected]"
] | |
1b02cbd81e6d0d70e6c61416944602b6e863075c
|
e15b2ebbb9bf30a50d1e720624e9853aa269fc05
|
/CoverSlasher/items.py
|
563945642bcb4b389d94c4ea6bdfbf3d8b5cf0e0
|
[] |
no_license
|
Hodo7amShichiYA/WnacgCoverSlasher
|
e42ce1ec438558c2890d1bf34f9a192eb1ab4f81
|
5734d58caedb3defff622bb45de6cd073f8b656d
|
refs/heads/master
| 2020-04-27T16:03:02.225473 | 2019-03-08T09:05:06 | 2019-03-08T09:05:06 | 174,470,262 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 326 |
py
|
# -*- coding: utf-8 -*-
import scrapy
class CoverslasherItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
image_urls = scrapy.Field() #保存图片地址
images = scrapy.Field() #保存图片的信息
image_names = scrapy.Field() #保存图片的信息
|
[
"[email protected]"
] | |
91f92c326775e661700467fed42718d9b09d1adb
|
e53d8488ffea72db3f3618f5639f2ddfa929f11b
|
/perpustakaan/migrations/0002_delete_buku.py
|
63677b59c0a8886eaae0991e502bc4c513f0449e
|
[] |
no_license
|
writerlab/perpus
|
60473fa2b51d67617525bfc25b75656141e9529b
|
9cf7676a543c3414ac2e7fca88c3d26ac403be3b
|
refs/heads/master
| 2023-07-15T05:27:23.859264 | 2021-08-29T02:02:34 | 2021-08-29T02:02:34 | 262,490,216 | 5 | 10 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
# Generated by Django 2.2.12 on 2020-05-11 07:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('perpustakaan', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Buku',
),
]
|
[
"[email protected]"
] | |
2a930f8fc17f6a4af9fdfaeb6ff31fb3020b1552
|
6be845bf70a8efaf390da28c811c52b35bf9e475
|
/windows/Resources/Python/Core/Lib/lib2to3/main.py
|
21120209a72ee4a781dc2b7ce9223426acd4d8bd
|
[] |
no_license
|
kyeremalprime/ms
|
228194910bf2ed314d0492bc423cc687144bb459
|
47eea098ec735b2173ff0d4e5c493cb8f04e705d
|
refs/heads/master
| 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,385 |
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: main.py
"""
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename, '(original)', '(refactored)', lineterm='')
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
Prints output to stdout.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs):
self.nobackups = nobackups
self.show_diffs = show_diffs
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
if not self.nobackups:
backup = filename + '.bak'
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error as err:
self.log_message("Can't rename %s to %s", filename, backup)
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message('No changes to %s', filename)
else:
self.log_message('Refactored %s', filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" % (
filename,))
return
return
def warn(msg):
print >> sys.stderr, 'WARNING: %s' % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
parser = optparse.OptionParser(usage='2to3 [options] file|dir ...')
parser.add_option('-d', '--doctests_only', action='store_true', help='Fix up doctests only')
parser.add_option('-f', '--fix', action='append', default=[], help='Each FIX specifies a transformation; default: all')
parser.add_option('-j', '--processes', action='store', default=1, type='int', help='Run 2to3 concurrently')
parser.add_option('-x', '--nofix', action='append', default=[], help='Prevent a transformation from being run')
parser.add_option('-l', '--list-fixes', action='store_true', help='List available transformations')
parser.add_option('-p', '--print-function', action='store_true', help='Modify the grammar so that print() is a function')
parser.add_option('-v', '--verbose', action='store_true', help='More verbose logging')
parser.add_option('--no-diffs', action='store_true', help="Don't show diffs of the refactoring")
parser.add_option('-w', '--write', action='store_true', help='Write back modified files')
parser.add_option('-n', '--nobackups', action='store_true', default=False, help="Don't write backups for modified files")
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print 'Available transformations for the -f/--fix option:'
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, 'At least one file or directory argument required.'
print >> sys.stderr, 'Use --help to show usage.'
return 2
if '-' in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags['print_function'] = True
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set((fixer_pkg + '.fix_' + fix for fix in options.nofix))
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == 'all':
all_present = True
else:
explicit.add(fixer_pkg + '.fix_' + fix)
if all_present:
requested = avail_fixes.union(explicit) if 1 else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
rt = StdoutRefactoringTool(sorted(fixer_names), flags, sorted(explicit), options.nobackups, not options.no_diffs)
if rt.errors or refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only, options.processes)
except refactor.MultiprocessingUnsupported:
print >> sys.stderr, "Sorry, -j isn't supported on this platform."
return 1
rt.summarize()
return int(bool(rt.errors))
|
[
"[email protected]"
] | |
f92227c51ec1996e3e31c2e0073f8916609625b5
|
e4bab7fc4e8eacb62ad35b4b58b9a5093bae44c7
|
/spec/rift/data/models/tenant.py
|
a7a4d5803847af6a42bab44665e8cd139f2cfdba
|
[
"Apache-2.0"
] |
permissive
|
mkam/Rift
|
972d5c571ead01480519509b783ec70b0636d10f
|
802892f7c119845e0f2ec5b0798463f210e7061f
|
refs/heads/master
| 2021-06-01T12:33:58.147207 | 2015-08-27T19:35:51 | 2015-08-27T19:35:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 414 |
py
|
import uuid
from specter import Spec, expect
from rift.data.models.tenant import Tenant
class TenantModel(Spec):
def can_convert_to_dictionary(self):
tmp_uuid = str(uuid.uuid4())
tenant = Tenant(name=tmp_uuid, tenant_id=tmp_uuid)
tenant_dict = tenant.as_dict()
test_dict = Tenant.build_tenant_from_dict(tenant_dict).as_dict()
expect(tenant_dict).to.equal(test_dict)
|
[
"[email protected]"
] | |
c6181955ae958e8e09f7d70d6cabc46465b949a8
|
9930f08717594022e0f7fde2a96baaa7fcfce784
|
/assignment3_prime number or not.py
|
c6ba297b1ad4aab747f95704e77a94145abc75b2
|
[] |
no_license
|
dinesh5555/python_assignments
|
72bd2d1cc35a92a01826536eeb4107953d8d73c7
|
33fbcbe1de8f92bd6ffe07fa66640ce1ab84a756
|
refs/heads/master
| 2022-11-11T18:42:41.621053 | 2020-07-03T09:12:49 | 2020-07-03T09:12:49 | 276,854,185 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 250 |
py
|
#!/usr/bin/env python
# coding: utf-8
# In[19]:
num=int(input("enter a number"))
if num>1:
for i in range(2,num):
if (num%i)==0:
print(num,"is not a prime number")
break
else:
print(num,"is a prime number")
|
[
"[email protected]"
] | |
7f78820ec7b0fb9e06f0c1b1bdf198ef2b2cabe4
|
77900cdd9a815caf1cd04705321ca93f5072179f
|
/Project2/Project2/.history/blog/views_20211114212413.py
|
9aa8a5a017e67a14ca1de498d7a75944f750328a
|
[] |
no_license
|
Bom19990111/helloword_python
|
717799d994223d65de5adaeabecf396ff2bc1fb7
|
2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7
|
refs/heads/master
| 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 |
Python
|
UTF-8
|
Python
| false | false | 486 |
py
|
from django.shortcuts import get_object_or_404, render
from .models import Blog
# Create your views here.
def all_blogs(request):
blogs = Blog.objects.filter(status=1).order_by('-created_on')
return render(request, 'blog/all_blogs.html', {'blogs': blogs})
def detail(request, slug):
blog = get_object_or_404(Blog, slug_title=slug)
return render(request, 'movies_details.html', {'blog': blog, 'blogs': app_movies})
return render(request, 'blog/detail.html')
|
[
"[email protected]"
] | |
73a769860358682887712313fed38e62177e3612
|
55815c281f6746bb64fc2ba46d074ca5af966441
|
/medium/299.py
|
4b6e196e858ad7ee70a73d1e40dbf8f868f06bf8
|
[] |
no_license
|
brandoneng000/LeetCode
|
def5107b03187ad7b7b1c207d39c442b70f80fc2
|
c7a42753b2b16c7b9c66b8d7c2e67b683a15e27d
|
refs/heads/master
| 2023-08-30T23:38:04.845267 | 2023-08-30T08:42:57 | 2023-08-30T08:42:57 | 199,584,584 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 670 |
py
|
import collections
class Solution:
def getHint(self, secret: str, guess: str) -> str:
cows = 0
bulls = 0
for i in range(len(secret)):
if secret[i] == guess[i]:
bulls += 1
secret_counter = collections.Counter(secret)
guess_counter = collections.Counter(guess)
cows = sum(secret_counter.values()) - sum((secret_counter - guess_counter).values()) - bulls
return f"{bulls}A{cows}B"
def main():
sol = Solution()
print(sol.getHint(secret = "1807", guess = "7810"))
print(sol.getHint(secret = "1123", guess = "0111"))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
610dffc1617cd22bd7b8e889c292a9d1ef1e3346
|
677002b757c0a1a00b450d9710a8ec6aeb9b9e9a
|
/tiago_public_ws/build/tiago_bringup/catkin_generated/pkg.develspace.context.pc.py
|
dd49ac14d12608a0d4daa23f54bf5fb2b0e9670f
|
[] |
no_license
|
mrrocketraccoon/tiago_development
|
ce686c86459dbfe8623aa54cf4279021342887fb
|
a0539bdcf21b67ab902a4649b516dcb929c54042
|
refs/heads/main
| 2023-06-16T19:39:33.391293 | 2021-07-08T21:20:03 | 2021-07-08T21:20:03 | 384,249,894 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tiago_bringup"
PROJECT_SPACE_DIR = "/tiago_public_ws/devel/.private/tiago_bringup"
PROJECT_VERSION = "2.0.58"
|
[
"[email protected]"
] | |
4af74a933a0a3e003647d10696dc9afb71b9e739
|
080c13cd91a073457bd9eddc2a3d13fc2e0e56ae
|
/MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/framework/experimental/math_ops.py
|
bb168de21996ba1f8aa825afb66054bf16c1f338
|
[
"Apache-2.0"
] |
permissive
|
Portfolio-Projects42/UsefulResourceRepo2.0
|
1dccc8961a09347f124d3ed7c27c6d73b9806189
|
75b1e23c757845b5f1894ebe53551a1cf759c6a3
|
refs/heads/master
| 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,746 |
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental impl for gen_math_ops.py using unified APIs, for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework.experimental import _math_ops
from tensorflow.python.framework.experimental import context_stack as context
def add(a, b, name=None):
ctx = context.get_default()
return _math_ops.add(ctx, a, b, name)
def mat_mul(a, b, name=None):
ctx = context.get_default()
return _math_ops.mat_mul(ctx, a, b, name)
def neg(a, name=None):
ctx = context.get_default()
return _math_ops.neg(ctx, a, name)
def sub(a, b, name=None):
ctx = context.get_default()
return _math_ops.sub(ctx, a, b, name)
def mul(a, b, name=None):
ctx = context.get_default()
return _math_ops.mul(ctx, a, b, name)
def log1p(a, name=None):
ctx = context.get_default()
return _math_ops.log1p(ctx, a, name)
def div_no_nan(a, b, name=None):
ctx = context.get_default()
return _math_ops.div_no_nan(ctx, a, b, name)
|
[
"[email protected]"
] | |
29d682db283e2dc08722ab6dd840796f0e982a94
|
e67a0139092d3389fea0075de9ecf12ab209649f
|
/scripts/addons_extern/AF_3dview_specials/VIEW3D_MT_armature_specials.py
|
47b9a6cae734433af79d7e3a2b6eef5aca78063f
|
[] |
no_license
|
amagnoni/blenderpython
|
9fe864d287f992b7cd71cd584fca4a501a6ac954
|
d2fec1a35369b7b171e2f0999196b87e242e08f3
|
refs/heads/master
| 2021-01-18T11:28:55.372759 | 2015-10-17T20:16:57 | 2015-10-17T20:16:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,882 |
py
|
# 3Dビュー > アーマチュア編集モード > 「W」キー
import bpy
import re
################
# オペレーター #
################
class CreateMirror(bpy.types.Operator):
bl_idname = "armature.create_mirror"
bl_label = "Select bones mirroring."
bl_description = "Mirrored at any axes selected bone."
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
obj = context.active_object
if (obj.type == "ARMATURE"):
if (obj.mode == "EDIT"):
preCursorCo = context.space_data.cursor_location[:]
prePivotPoint = context.space_data.pivot_point
preUseMirror = context.object.data.use_mirror_x
context.space_data.cursor_location = (0, 0, 0)
context.space_data.pivot_point = 'CURSOR'
context.object.data.use_mirror_x = True
selectedBones = context.selected_bones[:]
bpy.ops.armature.autoside_names(type='XAXIS')
bpy.ops.armature.duplicate()
axis = (True, False, False)
bpy.ops.transform.mirror(constraint_axis=axis)
bpy.ops.armature.flip_names()
newBones = []
for bone in context.selected_bones:
for pre in selectedBones:
if (bone.name == pre.name):
break
else:
newBones.append(bone)
bpy.ops.armature.select_all(action='DESELECT')
for bone in selectedBones:
bone.select = True
bone.select_head = True
bone.select_tail = True
bpy.ops.transform.transform(mode='BONE_ROLL', value=(0, 0, 0, 0))
bpy.ops.armature.select_all(action='DESELECT')
for bone in newBones:
bone.select = True
bone.select_head = True
bone.select_tail = True
context.space_data.cursor_location = preCursorCo[:]
context.space_data.pivot_point = prePivotPoint
context.object.data.use_mirror_x = preUseMirror
else:
self.report(type={"ERROR"}, message="Please perform in edit mode")
else:
self.report(type={"ERROR"}, message="Armature object is not")
return {'FINISHED'}
class CopyBoneName(bpy.types.Operator):
bl_idname = "armature.copy_bone_name"
bl_label = "Copy to Clipboard bone name"
bl_description = "Copies the Clipboard the name of active bone"
bl_options = {'REGISTER', 'UNDO'}
isObject = bpy.props.BoolProperty(name="Object name", default=False)
def execute(self, context):
if (self.isObject):
context.window_manager.clipboard = context.active_object.name + ":" + context.active_bone.name
else:
context.window_manager.clipboard = context.active_bone.name
return {'FINISHED'}
class RenameBoneRegularExpression(bpy.types.Operator):
bl_idname = "armature.rename_bone_regular_expression"
bl_label = "Replace the bone names in regular expressions"
bl_description = "In the bone name (of choice) to match regular expression replace"
bl_options = {'REGISTER', 'UNDO'}
isAll = bpy.props.BoolProperty(name="Including non-select all", default=False)
pattern = bpy.props.StringProperty(name="Replacement front (in regular expressions)", default="^")
repl = bpy.props.StringProperty(name="Replacement", default="@")
def execute(self, context):
obj = context.active_object
if (obj.type == "ARMATURE"):
if (obj.mode == "EDIT"):
bones = context.selected_bones
if (self.isAll):
bones = obj.data.bones
for bone in bones:
try:
new_name = re.sub(self.pattern, self.repl, bone.name)
except:
continue
bone.name = new_name
else:
self.report(type={"ERROR"}, message="Please perform in edit mode")
else:
self.report(type={"ERROR"}, message="Armature object is not")
return {'FINISHED'}
class RenameOppositeBone(bpy.types.Operator):
bl_idname = "armature.rename_opposite_bone"
bl_label = "Bones in the opposite position, rename."
bl_description = "Bone is located opposite the X axis selection in bone \"1.R 1 longs.L \' of so versus the"
bl_options = {'REGISTER', 'UNDO'}
threshold = bpy.props.FloatProperty(name="At the threshold", default=0.00001, min=0, soft_min=0, step=0.001, precision=5)
def execute(self, context):
obj = context.active_object
if (obj.type == "ARMATURE"):
if (obj.mode == "EDIT"):
arm = obj.data
bpy.ops.armature.autoside_names(type='XAXIS')
selectedBones = context.selected_bones[:]
bpy.ops.armature.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
threshold = self.threshold
for bone in selectedBones:
bone = arm.bones[bone.name]
head = (-bone.head_local[0], bone.head_local[1], bone.head_local[2])
tail = (-bone.tail_local[0], bone.tail_local[1], bone.tail_local[2])
for b in arm.bones:
if ( (head[0]-threshold) <= b.head_local[0] <= (head[0]+threshold)):
if ( (head[1]-threshold) <= b.head_local[1] <= (head[1]+threshold)):
if ( (head[2]-threshold) <= b.head_local[2] <= (head[2]+threshold)):
if ( (tail[0]-threshold) <= b.tail_local[0] <= (tail[0]+threshold)):
if ( (tail[1]-threshold) <= b.tail_local[1] <= (tail[1]+threshold)):
if ( (tail[2]-threshold) <= b.tail_local[2] <= (tail[2]+threshold)):
b.name = bone.name
b.select = True
b.select_head = True
b.select_tail = True
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.flip_names()
else:
self.report(type={"ERROR"}, message="Please perform in edit mode")
else:
self.report(type={"ERROR"}, message="Armature object is not")
return {'FINISHED'}
return {'FINISHED'}
# Menu
def menu(self, context):
self.layout.separator()
self.layout.prop(context.object.data, "use_mirror_x", icon="PLUGIN", text="X axis mirror edit")
self.layout.operator(CreateMirror.bl_idname, icon="PLUGIN")
self.layout.operator(RenameOppositeBone.bl_idname, icon="PLUGIN")
self.layout.separator()
self.layout.operator(CopyBoneName.bl_idname, icon="PLUGIN")
self.layout.operator(RenameBoneRegularExpression.bl_idname, icon="PLUGIN")
|
[
"[email protected]"
] | |
14258aac0b0a7e639801a834cddbdf0089e45ea8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_240/ch140_2020_04_01_19_19_15_085921.py
|
4c86827b303c447f074e6de350d868de44996204
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 204 |
py
|
def faixa_notas(notas):
a = 0
b = 0
c = 0
for i in notas:
if i < 5:
a += 1
elif i <= 7:
b += 1
else:
c += 1
return [a ,b ,c]
|
[
"[email protected]"
] | |
9d4fa0a1977714a0290798e157f5b22310e8461f
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/hdinsight/v20210601/_enums.py
|
edcf6d44105644a4c49b3c3c4900f7f15073a76f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,314 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'DaysOfWeek',
'DirectoryType',
'JsonWebKeyEncryptionAlgorithm',
'OSType',
'PrivateIPAllocationMethod',
'PrivateLink',
'PrivateLinkServiceConnectionStatus',
'ResourceIdentityType',
'ResourceProviderConnection',
'Tier',
]
class DaysOfWeek(str, Enum):
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
class DirectoryType(str, Enum):
"""
The directory type.
"""
ACTIVE_DIRECTORY = "ActiveDirectory"
class JsonWebKeyEncryptionAlgorithm(str, Enum):
"""
Algorithm identifier for encryption, default RSA-OAEP.
"""
RS_A_OAEP = "RSA-OAEP"
RS_A_OAE_P_256 = "RSA-OAEP-256"
RSA1_5 = "RSA1_5"
class OSType(str, Enum):
"""
The type of operating system.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class PrivateIPAllocationMethod(str, Enum):
"""
The method that private IP address is allocated.
"""
DYNAMIC = "dynamic"
STATIC = "static"
class PrivateLink(str, Enum):
"""
Indicates whether or not private link is enabled.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class PrivateLinkServiceConnectionStatus(str, Enum):
"""
The concrete private link service connection.
"""
APPROVED = "Approved"
REJECTED = "Rejected"
PENDING = "Pending"
REMOVED = "Removed"
class ResourceIdentityType(str, Enum):
"""
The type of identity used for the cluster. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class ResourceProviderConnection(str, Enum):
"""
The direction for the resource provider connection.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class Tier(str, Enum):
"""
The cluster tier.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
|
[
"[email protected]"
] | |
77b74a14ad5a1874eb757c258db26fc759163437
|
43e900f11e2b230cdc0b2e48007d40294fefd87a
|
/laioffer/remove-certain-characters.py
|
ba414a70cff9fca8d6bb41e33f8626f682e9c25a
|
[] |
no_license
|
DarkAlexWang/leetcode
|
02f2ed993688c34d3ce8f95d81b3e36a53ca002f
|
89142297559af20cf990a8e40975811b4be36955
|
refs/heads/master
| 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 496 |
py
|
class Solution:
def remove(self, string, t):
array = list(string)
uniq_t = set(t)
slow = 0
fast = 0
for fast in range(0, len(array)):
if array[fast] not in uniq_t:
array[slow] = array[fast]
slow += 1
res = ""
for i in range(slow):
res += array[i]
return res
if __name__ == "__main__":
solution = Solution()
res = solution.remove("aaabbbccc", "a")
print(res)
|
[
"[email protected]"
] | |
e856c3512502cc8ddd31849054c4633d661bca3c
|
9d6271fd3851acb797a5120e0d884130f7548833
|
/kmeans.py
|
4950fd689a0074d89dbcfb3e82ec63e3d12597e9
|
[] |
no_license
|
Wenbin94/toolbox
|
f5d69e1b3a158ad076562829e2d83738e282da04
|
e88e1ba51e5a4c963626000b434072b6aa64e09d
|
refs/heads/master
| 2020-08-22T02:50:57.779313 | 2019-10-08T10:57:52 | 2019-10-08T10:57:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,233 |
py
|
'''
2019.10.8 ming71
功能: 对box进行kmeans聚类
注意:
- 停止条件是最小值索引不变而不是最小值不变,会造成早停,可以改
- 暂时仅支持voc标注
- 如需改动再重写get_all_boxes函数即可
'''
import numpy as np
import glob
import os
from decimal import Decimal
class Kmeans:
def __init__(self, cluster_number, all_boxes, save_path):
self.cluster_number = cluster_number
self.all_boxes = all_boxes
self.save_path = save_path
# 输入两个二维数组:所有box和种子点box
# 输出[num_boxes, k]的结果
def iou(self, boxes, clusters): # 1 box -> k clusters
n = boxes.shape[0]
k = self.cluster_number #类别
box_area = boxes[:, 0] * boxes[:, 1] #列表切片操作:取所有行0列和1列相乘 ,得到gt的面积的行向量
box_area = box_area.repeat(k) #行向量进行重复
box_area = np.reshape(box_area, (n, k))
cluster_area = clusters[:, 0] * clusters[:, 1] #种子点的面积行向量
cluster_area = np.tile(cluster_area, [1, n])
cluster_area = np.reshape(cluster_area, (n, k))
box_w_matrix = np.reshape(boxes[:, 0].repeat(k), (n, k))
cluster_w_matrix = np.reshape(np.tile(clusters[:, 0], (1, n)), (n, k))
min_w_matrix = np.minimum(cluster_w_matrix, box_w_matrix)
box_h_matrix = np.reshape(boxes[:, 1].repeat(k), (n, k))
cluster_h_matrix = np.reshape(np.tile(clusters[:, 1], (1, n)), (n, k))
min_h_matrix = np.minimum(cluster_h_matrix, box_h_matrix)
inter_area = np.multiply(min_w_matrix, min_h_matrix)
result = inter_area / (box_area + cluster_area - inter_area + 1e-16)
assert (result>0).all() == True , 'negtive anchors present , cluster again!'
return result
def avg_iou(self, boxes, clusters):
accuracy = np.mean([np.max(self.iou(boxes, clusters), axis=1)])
return accuracy
#注意:这里代码选择的停止聚类的条件是最小值的索引不变,而不是种子点的数值不变。这样的误差会大一点。
def kmeans(self, boxes, k, dist=np.median):
box_number = boxes.shape[0] # box个数
distances = np.empty((box_number, k)) # 初始化[box_number , k]二维数组,存放自定义iou距离(obj*anchor)
last_nearest = np.zeros((box_number,)) # [box_number , ]的标量
np.random.seed()
clusters = boxes[np.random.choice(
box_number, k, replace=False)] # 种子点随机初始化
# 种子点一旦重复会有计算错误,避免!
while True :
uniques_clusters = np.unique(clusters,axis=0)
if len(uniques_clusters)==len(clusters) :
break
clusters = boxes[np.random.choice(box_number, k, replace=False)]
# k-means
while True:
# 每轮循环,计算种子点外所有点各自到k个种子点的自定义距离,并且按照距离各个点找离自己最近的种子点进行归类;计算新的各类中心;然后下一轮循环
distances = 1 - self.iou(boxes, clusters) # iou越大,距离越小
current_nearest = np.argmin(distances, axis=1) # 展开为box_number长度向量,代表每个box当前属于哪个种子点类别(0,k-1)
if (last_nearest == current_nearest).all(): # 每个box的当前类别所属和上一次相同,不再移动聚类
break
#计算新的k个种子点坐标
for cluster in range(k):
clusters[cluster] = dist(boxes[current_nearest == cluster], axis=0) # 只对还需要聚类的种子点进行位移
last_nearest = current_nearest
return clusters
def result2txt(self, data):
f = open(self.save_path, 'w')
row = np.shape(data)[0]
for i in range(row):
if i == 0:
x_y = "%d,%d" % (data[i][0], data[i][1])
else:
x_y = ", %d,%d" % (data[i][0], data[i][1])
f.write(x_y)
f.close() #最终输出的是w1,h1,w2,h2,w3,h3,...
def clusters(self):
all_boxes = np.array(self.all_boxes) #返回全部gt的宽高二维数组
result = self.kmeans(all_boxes, k=self.cluster_number) #传入两个聚类参数:所有gt宽高的二维数组和种子点数,并返回聚类结果k*2
result = result[np.lexsort(result.T[0, None])] #将得到的三个anchor按照宽进行从小到大,重新排序
self.result2txt(result)
print("K anchors:\n {}".format(result))
print("Accuracy: {:.2f}%".format(
self.avg_iou(all_boxes, result) * 100))
# 返回所有label的box,形式为[[w1,h1],[w2,h2],...]
def get_all_boxes(path):
mode = 'voc'
boxes = []
labels = sorted(glob.glob(os.path.join(path, '*.*')))
for label in labels:
with open(label,'r') as f:
contents = f.read()
objects = contents.split('<object>')
objects.pop(0)
assert len(objects) > 0, 'No object found in ' + xml_path
for object in objects:
xmin = int(object[object.find('<xmin>')+6 : object.find('</xmin>')])
xmax = int(object[object.find('<xmax>')+6 : object.find('</xmax>')])
ymin = int(object[object.find('<ymin>')+6 : object.find('</ymin>')])
ymax = int(object[object.find('<ymax>')+6 : object.find('</ymax>')])
box_w = xmax - xmin
box_h = ymax - ymin
boxes.append((box_w,box_h))
return boxes
if __name__ == "__main__":
cluster_number = 9 # 种子点个数,即anchor数目
label_path = r'/py/datasets/ship/tiny_ships/yolo_ship/train_labels'
save_path = r'/py/yolov3/cfg/anchor-cluster.txt'
all_boxes = get_all_boxes(label_path)
kmeans = Kmeans(cluster_number, all_boxes,save_path)
kmeans.clusters()
|
[
"[email protected]"
] | |
65b8808ec3e1a0d27451e396ee0d6a134cdabb91
|
a98cab2f9c24a85a5f46b2cbec7506b79f4ea634
|
/app/src/models/sentence_model.py
|
1588e64f2771d2064366357aaa9e173d0246e6a2
|
[] |
no_license
|
DIS-SIN/ODSC-2019
|
b8b8d10b41d95925219a0be36b5ef8b541396681
|
c2a606471452e358f0e245841e78f562c570bbf5
|
refs/heads/master
| 2020-05-17T18:27:08.591454 | 2019-04-30T03:02:35 | 2019-04-30T03:02:35 | 183,884,786 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
from neomodel import (
StructuredNode,
StringProperty,
DateTimeProperty,
UniqueIdProperty
)
from datetime import datetime
class Sentence(StructuredNode):
nodeId = UniqueIdProperty()
sentence = StringProperty(required=True)
addedOn = DateTimeProperty(default_now=True)
updatedOn = DateTimeProperty()
sentimentScore = StringProperty()
magnitudeScore = StringProperty()
def pre_save(self):
self.updatedOn = datetime.utcnow()
|
[
"[email protected]"
] | |
184895f8be106c50e7efd39383cec128bad28d48
|
8780bc7f252f14ff5406ce965733c099034920b7
|
/pyCode/pagesize/pagesize/wsgi.py
|
dfa911c708e95d9a1ec3be3f2f82bcdcfb628314
|
[] |
no_license
|
13661892653/workspace
|
5e4e458d31b9355c67d67ba7d9faccbcc1ac9f6b
|
17960becabb3b4f0fc30009c71a11c4f7a5f8330
|
refs/heads/master
| 2020-12-24T20:00:15.541432 | 2018-08-14T13:56:15 | 2018-08-14T13:56:15 | 86,225,975 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
"""
WSGI config for pagesize project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pagesize.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
72400fc6b6ffce55fbd9162fc62cecddf26120d2
|
d8169f7c2efdeb40fe9dcdd59ce040138804d2af
|
/2nd/mysite/settings.py
|
b80fdf7c56fd69ac13acd7925dd80038a10abed8
|
[] |
no_license
|
KimDoKy/pyDjango
|
d9ab67b6da6541ebd04658945922d9924a85b107
|
53ef776dd20488f0dfda6b7e3fd5281e8f3e98fd
|
refs/heads/master
| 2020-12-30T13:08:15.951633 | 2017-10-04T10:01:15 | 2017-10-04T10:01:15 | 91,325,691 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,604 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*kr)qf=3+unt7*9chabk@bc#(esu0cs8_o)nqgg8!e%crpv@5+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookmark.apps.BookmarkConfig',
'blog.apps.BlogConfig',
'tagging.apps.TaggingConfig',
'disqus',
'django.contrib.sites',
'photo.apps.PhotoConfig',
]
DISQUS_WEBSITE_SHORTNAME = 'dokys-blog'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#LOGIN_URL = '/accounts/login/'
#LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
|
[
"[email protected]"
] | |
52bc7f128792a60754a8768605b64ec973f3a0b1
|
c61f41a8655b39098ffa257fb994979d17dfb10c
|
/cremilda/parser.py
|
f9fd5a4c5ff50d4043307e03b5cbf47de4a5c04b
|
[] |
no_license
|
luizpaulosilva/compiladores-1
|
48f09085c0f61b2f1bea0507adde9a03473b2d23
|
f553d9de0b6cd764d11bd533cec6bde9877d6587
|
refs/heads/master
| 2020-03-18T10:50:01.200756 | 2018-05-03T17:13:41 | 2018-05-03T17:13:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,079 |
py
|
import ox
from .lexer import tokens
from .ast import BinOp, FCall, Atom, Assign
def make_parser():
return ox.make_parser([
('module : statement SEMICOLON', lambda x, _: [x]),
('module : statement SEMICOLON module', statements),
('statement : NAME EQ expr', var_def),
('expr : atom OP expr', op_call),
('expr : atom', identity),
('atom : NUMBER', lambda x: Atom(float(x))),
('atom : STRING', lambda x: Atom(x[1:-1])),
('atom : BOOL', lambda x: Atom(x == 'true')),
('atom : LPAR expr RPAR', lambda x, y, z: y),
('atom : fcall', identity),
('fcall : NAME LPAR RPAR', lambda x, y, z: FCall(x, [])),
('fcall : NAME LPAR args RPAR', fcall),
('args : expr COMMA args', lambda x, _, xs: [x, *xs]),
('args : expr', lambda x: [x]),
], tokens=tokens)
# Funçoes auxiliares
identity = (lambda x: x)
op_call = (lambda x, op, y: BinOp(op, x, y))
fcall = (lambda x, y, z, w: FCall(x, z))
statements = (lambda x, _, xs: [x, *xs])
var_def = (lambda name, eq, expr: Assign(name, expr))
# Cria parser
parser = make_parser()
|
[
"[email protected]"
] | |
51ed26d155d3ac70a5b01ef59f20d79a642bf07f
|
ef6229d281edecbea3faad37830cb1d452d03e5b
|
/ucsmsdk/mometa/adaptor/AdaptorIscsiAuth.py
|
85653c123f5847d9bf6701d752efdd160c69cfe0
|
[
"Apache-2.0"
] |
permissive
|
anoop1984/python_sdk
|
0809be78de32350acc40701d6207631322851010
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
refs/heads/master
| 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,402 |
py
|
"""This module contains the general information for AdaptorIscsiAuth ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorIscsiAuthConsts():
pass
class AdaptorIscsiAuth(ManagedObject):
"""This is AdaptorIscsiAuth class."""
consts = AdaptorIscsiAuthConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorIscsiAuth", "adaptorIscsiAuth", "iscsi-auth", VersionMeta.Version201m, "InputOutput", 0x1f, [], ["read-only"], [], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201m, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"user_id": MoPropertyMeta("user_id", "userId", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"password": "password",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"userId": "user_id",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.password = None
self.sacl = None
self.status = None
self.user_id = None
ManagedObject.__init__(self, "AdaptorIscsiAuth", parent_mo_or_dn, **kwargs)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.