hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ab4aa21cfd4871d1766355bdd0923074d0f5c05 | 32,515 | py | Python | gpMgmt/bin/gpload_test/gpload2/TEST.py | Tylarb/gpdb | 15e1341cfbac7f70d2086a9a1d46149a82765b5e | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2020-07-08T13:20:27.000Z | 2020-07-08T13:20:27.000Z | gpMgmt/bin/gpload_test/gpload2/TEST.py | Tylarb/gpdb | 15e1341cfbac7f70d2086a9a1d46149a82765b5e | [
"PostgreSQL",
"Apache-2.0"
] | 6 | 2020-06-24T18:56:06.000Z | 2022-02-26T08:53:11.000Z | gpMgmt/bin/gpload_test/gpload2/TEST.py | Tylarb/gpdb | 15e1341cfbac7f70d2086a9a1d46149a82765b5e | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import unittest
import sys
import os
import string
import time
import socket
import fileinput
import platform
import re
try:
import subprocess32 as subprocess
except:
import subprocess
import pg
"""
Global Values
"""
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
UPD = os.path.abspath(mkpath('..'))
if UPD not in sys.path:
sys.path.append(UPD)
DBNAME = "postgres"
USER = os.environ.get( "LOGNAME" )
HOST = socket.gethostname()
GPHOME = os.getenv("GPHOME")
PGPORT = get_port()
PGUSER = os.environ.get("PGUSER")
if PGUSER is None:
PGUSER = USER
PGHOST = os.environ.get("PGHOST")
if PGHOST is None:
PGHOST = HOST
d = mkpath('config')
if not os.path.exists(d):
os.mkdir(d)
def psql_run(ifile = None, ofile = None, cmd = None,
flag = '-e',dbname = None,
username = None,
PGOPTIONS = None, host = None, port = None):
'''
Run a command or file against psql. Return True if OK.
@param dbname: database name
@param ifile: input file
@param cmd: command line
@param flag: -e Run SQL with no comments (default)
-a Run SQL with comments and psql notice
@param username: psql user
@param host : to connect to a different host
@param port : port where gpdb is running
@param PGOPTIONS: connects to postgres via utility mode
'''
if dbname is None:
dbname = DBNAME
if username is None:
username = PGUSER # Use the default login user
if PGOPTIONS is None:
PGOPTIONS = ""
else:
PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS
if host is None:
host = "-h %s" % PGHOST
else:
host = "-h %s" % host
if port is None:
port = ""
else:
port = "-p %s" % port
if cmd:
arg = '-c "%s"' % cmd
elif ifile:
arg = ' < ' + ifile
if not (flag == '-q'): # Don't echo commands sent to server
arg = '-e < ' + ifile
if flag == '-a':
arg = '-f ' + ifile
else:
raise PSQLError('missing cmd and ifile')
if ofile == '-':
ofile = '2>&1'
elif not ofile:
ofile = '> /dev/null 2>&1'
else:
ofile = '> %s 2>&1' % ofile
return run('%s psql -d %s %s %s -U %s %s %s %s' %
(PGOPTIONS, dbname, host, port, username, flag, arg, ofile))
def run(cmd):
"""
Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise.
@params cmd: The command to run at the shell.
oFile: an optional output file.
mode: What to do if the output file already exists: 'a' = append;
'w' = write. Defaults to append (so that the function is
backwards compatible). Yes, this is passed to the open()
function, so you can theoretically pass any value that is
valid for the second parameter of open().
"""
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = p.communicate()[0]
ret = []
ret.append(out)
rc = False if p.wait() else True
return (rc,ret)
def read_diff(ifile, outputPath):
"""
Opens the diff file that is assocated with the given input file and returns
its contents as a string.
"""
dfile = diffFile(ifile, outputPath)
with open(dfile, 'r') as diff:
return diff.read()
hostNameAddrs = get_ip(HOST)
masterPort = getPortMasterOnly()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase)
runner = unittest.TextTestRunner(verbosity=2)
ret = not runner.run(suite).wasSuccessful()
sys.exit(ret)
| 42.172503 | 421 | 0.611041 |
0ab4e78536a96c9504186fa7b02c118e2936a403 | 1,406 | py | Python | code_week19_831_96/biao_shi_shu_zi.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week19_831_96/biao_shi_shu_zi.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week19_831_96/biao_shi_shu_zi.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | '''
"+100""5e2""-123""3.1416""-1E-16""0123""12e""1a3.14""1.2.3""+-5""12e+5.4"
LeetCode
https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof
'''
| 45.354839 | 129 | 0.345661 |
0ab611b64794b954266ea15a077d39ba3447ef27 | 13,211 | py | Python | teeth_overlord/tests/unit/networks/neutron.py | rackerlabs/teeth-overlord | d76f6a03853d964b556aa1aa0f7011b4d1a6f208 | [
"Apache-2.0"
] | null | null | null | teeth_overlord/tests/unit/networks/neutron.py | rackerlabs/teeth-overlord | d76f6a03853d964b556aa1aa0f7011b4d1a6f208 | [
"Apache-2.0"
] | null | null | null | teeth_overlord/tests/unit/networks/neutron.py | rackerlabs/teeth-overlord | d76f6a03853d964b556aa1aa0f7011b4d1a6f208 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Rackspace, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from teeth_overlord import config
from teeth_overlord.networks import neutron
from teeth_overlord import tests
from keystoneclient.apiclient import exceptions as keystone_exceptions
from keystoneclient.v2_0 import client as keystone_client
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.neutron import client as neutron_client
NETWORK1_RESPONSE = {
u'status': u'ACTIVE',
u'subnets': [u'SUBNET1'],
u'name': u'private',
u'provider:physical_network': None,
u'admin_state_up': True,
u'tenant_id': u'TENANTID',
u'provider:network_type': u'local',
u'router:external': False,
u'shared': False,
u'id': u'NETWORK1',
u'provider:segmentation_id': None
}
NETWORK2_RESPONSE = {
u'status': u'ACTIVE',
u'subnets': [u'SUBNET2'],
u'name': u'public',
u'provider:physical_network': None,
u'admin_state_up': True,
u'tenant_id': u'TENANTID',
u'provider:network_type': u'local',
u'router:external': True,
u'shared': False,
u'id': u'NETWORK2',
u'provider:segmentation_id': None
}
PORT1_RESPONSE = {
u'status': u'ACTIVE',
u'binding:host_id': u'precise64',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'NETWORK1',
u'tenant_id': u'TENANTID',
u'extra_dhcp_opts': [],
u'binding:vif_type': u'ovs',
u'device_owner': u'network:dhcp',
u'binding:capabilities': {u'port_filter': True},
u'mac_address': u'fa:16:3e:e0:d4:63',
u'fixed_ips': [
{
u'subnet_id': u'SUBNET1',
u'ip_address': u'10.0.0.3'
}
],
u'id': u'PORT1',
u'security_groups': [],
u'device_id': u''
}
PORT2_RESPONSE = {
u'status': u'DOWN',
u'binding:host_id': u'',
u'name': u'',
u'allowed_address_pairs': [],
u'admin_state_up': True,
u'network_id': u'NETWORK2',
u'tenant_id': u'TENANTID',
u'extra_dhcp_opts': [],
u'binding:vif_type': u'unbound',
u'device_owner': u'',
u'binding:capabilities': {u'port_filter': False},
u'mac_address': u'00:09:7b:3e:18:ca',
u'fixed_ips': [
{
u'subnet_id': u'SUBNET2',
u'ip_address': u'192.168.27.3'
}
],
u'id': u'PORT2',
u'security_groups': [u'SECGRP'],
u'device_id': u''
}
SUBNET1_RESPONSE = {
u'name': u'private-subnet',
u'enable_dhcp': True,
u'network_id': u'NETWORK1',
u'tenant_id': u'TENANTID',
u'dns_nameservers': [],
u'allocation_pools': [
{
u'start': u'10.0.0.2',
u'end': u'10.0.0.254'
}
],
u'host_routes': [],
u'ip_version': 4,
u'gateway_ip': u'10.0.0.1',
u'cidr': u'10.0.0.0/24',
u'id': u'SUBNET1'
}
SUBNET2_RESPONSE = {
u'name': u'public-subnet',
u'enable_dhcp': False,
u'network_id': u'NETWORK2',
u'tenant_id': u'TENANTID',
u'dns_nameservers': [],
u'allocation_pools': [
{
u'start': u'192.168.27.1',
u'end': u'192.168.27.1'
},
{
u'start': u'192.168.27.3',
u'end': u'192.168.27.254'
}
],
u'host_routes': [],
u'ip_version': 4,
u'gateway_ip': u'192.168.27.2',
u'cidr': u'192.168.27.0/24',
u'id': u'SUBNET2'
}
SERIALIZED_NETWORK1 = collections.OrderedDict([
('id', u'NETWORK1'),
('name', u'private'),
('status', u'ACTIVE'),
('subnets', [
collections.OrderedDict([
('id', u'SUBNET1'),
('name', u'private-subnet'),
('ip_version', 4),
('gateway_ip', u'10.0.0.1'),
('cidr', u'10.0.0.0/24'),
('enable_dhcp', True)
])
])
])
SERIALIZED_NETWORK2 = collections.OrderedDict([
('id', u'NETWORK2'),
('name', u'public'),
('status', u'ACTIVE'),
('subnets', [
collections.OrderedDict([
('id', u'SUBNET2'),
('name', u'public-subnet'),
('ip_version', 4),
('gateway_ip', u'192.168.27.2'),
('cidr', u'192.168.27.0/24'),
('enable_dhcp', False)
])
])
])
SERIALIZED_PORT1 = collections.OrderedDict([
('id', u'PORT1'),
('name', u''),
('status', u'ACTIVE'),
('mac_address', u'fa:16:3e:e0:d4:63'),
('fixed_ips', [
{
u'subnet_id': u'SUBNET1',
u'ip_address': u'10.0.0.3'
}
]),
('network', SERIALIZED_NETWORK1)
])
| 32.221951 | 78 | 0.626448 |
0ab7ab472dc6bde156894c22490a3de97781b2d7 | 4,508 | py | Python | typeidea/blog/views.py | Phoenix-sy/typeidea | e913218872c7f4e9afc290eb42b4ca8c8e4523be | [
"MIT"
] | null | null | null | typeidea/blog/views.py | Phoenix-sy/typeidea | e913218872c7f4e9afc290eb42b4ca8c8e4523be | [
"MIT"
] | 4 | 2020-06-06T01:37:34.000Z | 2021-09-08T01:49:56.000Z | typeidea/blog/views.py | Phoenix-sy/typeidea | e913218872c7f4e9afc290eb42b4ca8c8e4523be | [
"MIT"
] | null | null | null | from datetime import date
from django.core.cache import cache
from django.db.models import Q, F
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
#from silk.profiling.profiler import silk_profile
from config.models import SideBar
from .models import Post, Tag, Category
from comment.models import Comment
'''
def post_list(request, category_id=None, tag_id=None):
tag = None
category = None
if tag_id:
post_list, tag = Post.get_by_tag(tag_id)
elif category_id:
post_list, category=Post.get_by_category(category_id)
else:
post_list = Post.latest_posts()
context = {
'category': category,
'tag': tag,
'post_list': post_list,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/list.html', context=context)
def post_detail(request, post_id=None):
try:
post = Post.objects.get(id=post_id)
except Post.DoesNotExist:
raise Http404('Post does not exist!')
context={
'post': post,
'sidebars': SideBar.get_all(),
}
context.update(Category.get_navs())
return render(request, 'blog/detail.html', context=context)
'''
| 24.367568 | 72 | 0.717613 |
0ab8196f812a9bd1c5cff6d84c43cd3a82467a55 | 618 | py | Python | VMI/VMItest.py | thomasbarillot/DAQ | 20126655f74194757d25380680af9429ff27784e | [
"MIT"
] | 1 | 2017-04-25T10:56:01.000Z | 2017-04-25T10:56:01.000Z | VMI/VMItest.py | thomasbarillot/DAQ | 20126655f74194757d25380680af9429ff27784e | [
"MIT"
] | null | null | null | VMI/VMItest.py | thomasbarillot/DAQ | 20126655f74194757d25380680af9429ff27784e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 7 11:38:18 2016
@author: thomasbarillot
VMI control
"""
from ctypes import cdll
#slib="VMIcrtl_ext.dll"
#hlib=cdll('VMIcrtl.dll')
import VMIcrtl_ext
test=VMIcrtl_ext.VMIcrtl()
#%%
print test.GetFilename()
#%%
test.setFilename('20161115_1841.dat')
print test.GetFilename()
#%%
test.StartAcquisitionPrev()
#%%
test.StopAcquisition()
#%%
img=test.RecallImagePrev()
#%%
import numpy as np
print np.shape(img)
a=np.array(img)
print a
#%%
from matplotlib import pyplot as plt
#%%
b=np.reshape(a,[400,400])
print b
plt.figure()
plt.pcolor(np.reshape(a,[400,400])) | 12.875 | 37 | 0.699029 |
0ab878278314d67f6d0be9f6568f133ce9e1ee76 | 8,119 | py | Python | var/spack/repos/builtin/packages/openssl/package.py | vitodb/spack | b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/openssl/package.py | vitodb/spack | b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2021-01-11T09:16:43.000Z | 2021-01-12T20:07:23.000Z | var/spack/repos/builtin/packages/openssl/package.py | vitodb/spack | b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2021-01-06T18:58:26.000Z | 2021-01-06T18:58:26.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
from spack import *
import spack.architecture
import os
| 51.713376 | 96 | 0.711787 |
0ab9be78769ca53a9456cd93a3fd3ab2a85a0c35 | 4,799 | py | Python | vispy/util/profiler.py | izaid/vispy | 402cf95bfef88d70c9c45bb27c532ed72944e14a | [
"BSD-3-Clause"
] | null | null | null | vispy/util/profiler.py | izaid/vispy | 402cf95bfef88d70c9c45bb27c532ed72944e14a | [
"BSD-3-Clause"
] | null | null | null | vispy/util/profiler.py | izaid/vispy | 402cf95bfef88d70c9c45bb27c532ed72944e14a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Adapted from PyQtGraph
import sys
from . import ptime
from .. import config
| 34.52518 | 79 | 0.583663 |
0aba3f90d5e6185589e45a9a8d8d372bccb752c2 | 764 | py | Python | tests/test_processor.py | vijithv/djangosaml2idp | 8a238063da55bf4823bdc2192168171767c4e056 | [
"Apache-2.0"
] | 1 | 2021-11-03T17:53:29.000Z | 2021-11-03T17:53:29.000Z | tests/test_processor.py | vijithv/djangosaml2idp | 8a238063da55bf4823bdc2192168171767c4e056 | [
"Apache-2.0"
] | null | null | null | tests/test_processor.py | vijithv/djangosaml2idp | 8a238063da55bf4823bdc2192168171767c4e056 | [
"Apache-2.0"
] | 1 | 2020-04-23T03:52:10.000Z | 2020-04-23T03:52:10.000Z | from django.contrib.auth import get_user_model
from djangosaml2idp.processors import BaseProcessor
User = get_user_model()
| 29.384615 | 97 | 0.722513 |
0abaca3d1ed91ca49de4c9b160592c473142f544 | 1,840 | py | Python | com/ds/SingleLinkedList.py | sasikrishna/python-programs | 937002f37c86efc5c876b37c7b42634ca629fffc | [
"MIT"
] | null | null | null | com/ds/SingleLinkedList.py | sasikrishna/python-programs | 937002f37c86efc5c876b37c7b42634ca629fffc | [
"MIT"
] | null | null | null | com/ds/SingleLinkedList.py | sasikrishna/python-programs | 937002f37c86efc5c876b37c7b42634ca629fffc | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
list = SingleLinkedList();
list.add(5)
list.add(4)
list.add(12)
list.add(13)
list.add(19)
list.print_list();
print("List contains element 4", list.contains(4))
print("List contains element 6", list.contains(6))
print("Removing element 13", list.remove(13))
list.print_list();
print("List contains element 13", list.contains(13))
| 23.896104 | 56 | 0.563043 |
0abb04a5bd64547bc5fd647c86d2afb7977fd604 | 55 | py | Python | src/data_setup/__init__.py | data-stories/chart-experiment | f4d7c86c32edca8bcb474cce5f6312138acf5cc9 | [
"MIT"
] | null | null | null | src/data_setup/__init__.py | data-stories/chart-experiment | f4d7c86c32edca8bcb474cce5f6312138acf5cc9 | [
"MIT"
] | 1 | 2021-08-07T07:39:17.000Z | 2021-08-07T07:39:17.000Z | src/data_setup/__init__.py | data-stories/chart-experiment | f4d7c86c32edca8bcb474cce5f6312138acf5cc9 | [
"MIT"
] | 1 | 2021-08-06T16:27:00.000Z | 2021-08-06T16:27:00.000Z | __all__ = ["data_setup", "chart_params", "base_params"] | 55 | 55 | 0.727273 |
0abb3c732259b19c9e708a20325a84c61a393244 | 1,851 | py | Python | src/aiocomcrawl/models.py | rudaporto/aiocomcrawl | 9f76097d9f82c5790f968d26a6f1c3908084569b | [
"Apache-2.0"
] | null | null | null | src/aiocomcrawl/models.py | rudaporto/aiocomcrawl | 9f76097d9f82c5790f968d26a6f1c3908084569b | [
"Apache-2.0"
] | null | null | null | src/aiocomcrawl/models.py | rudaporto/aiocomcrawl | 9f76097d9f82c5790f968d26a6f1c3908084569b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import Any, List, Optional, Union
from pydantic import BaseModel, Field, HttpUrl, validator
from pydantic.dataclasses import dataclass
class Result(BaseModel):
url_key: str = Field(alias="urlkey")
timestamp: datetime
url: str
mime: str
mime_detected: str = Field(alias="mime-detected")
status: int
digest: str
length: int
offset: int
filename: str
languages: Optional[str]
encoding: Optional[str]
index_id: Optional[str]
body: Optional[ResultBody]
meta: Optional[ResultMeta]
| 23.730769 | 81 | 0.678012 |
0abb4de3626dcbaf10f7a01c7d732b38a10d112a | 3,453 | py | Python | fs/error_tools.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | fs/error_tools.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | fs/error_tools.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | """Tools for managing OS errors.
"""
from __future__ import print_function
from __future__ import unicode_literals
import errno
from contextlib import contextmanager
import sys
import platform
from . import errors
from six import reraise
_WINDOWS_PLATFORM = platform.system() == 'Windows'
# Stops linter complaining about invalid class name
convert_os_errors = _ConvertOSErrors
| 31.390909 | 78 | 0.650449 |
0abbc3e1d5afde9470d734d62bcb0511ac93cadd | 5,390 | py | Python | samples/samplenetconf/demos/vr_demo3.py | gaberger/pysdn | 67442e1c259d8ca8620ada95b95977e3852463c5 | [
"BSD-3-Clause"
] | 1 | 2017-08-22T14:17:10.000Z | 2017-08-22T14:17:10.000Z | samples/samplenetconf/demos/vr_demo3.py | gaberger/pysdn | 67442e1c259d8ca8620ada95b95977e3852463c5 | [
"BSD-3-Clause"
] | 1 | 2021-03-26T00:47:22.000Z | 2021-03-26T00:47:22.000Z | samples/samplenetconf/demos/vr_demo3.py | gaberger/pysdn | 67442e1c259d8ca8620ada95b95977e3852463c5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pysdn.controller.controller import Controller
from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600
from pysdn.common.status import STATUS
from pysdn.common.utils import load_dict_from_file
if __name__ == "__main__":
vr_demo_3()
| 34.113924 | 78 | 0.62115 |
0abcb370c0d40bd870443ed0b022026c144555c8 | 3,829 | py | Python | python/index.py | stijnvanhulle/EscapeGame | ae3e35334d64394a0f696149bfd56c1fd7a97681 | [
"MIT"
] | 1 | 2020-08-16T02:52:06.000Z | 2020-08-16T02:52:06.000Z | python/index.py | stijnvanhulle/EscapeGame | ae3e35334d64394a0f696149bfd56c1fd7a97681 | [
"MIT"
] | 1 | 2021-10-18T18:39:08.000Z | 2021-10-18T18:39:08.000Z | python/index.py | stijnvanhulle/EscapeGame | ae3e35334d64394a0f696149bfd56c1fd7a97681 | [
"MIT"
] | null | null | null | # @Author: Stijn Van Hulle <stijnvanhulle>
# @Date: 2016-11-28T13:51:38+01:00
# @Email: [email protected]
# @Last modified by: stijnvanhulle
# @Last modified time: 2016-12-20T12:51:07+01:00
# @License: stijnvanhulle.be
#!/usr/bin/env python
import time
import datetime
import math
import sys
import json
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import lib.faceDetection as faceDetection
import lib.levelCalculation as levelCalculation
MQTT_BROKER="localhost"
client = mqtt.Client()
#classes
if __name__ == '__main__':
try:
if len(sys.argv)>1:
MQTT_BROKER=sys.argv[1]
else:
input_text = input("Ip of MQTT-broker: ")
if input_text:
MQTT_BROKER=input_text
#executor = ProcessPoolExecutor(2)
#loop = trollius.get_event_loop()
#_main = trollius.async(loop.run_in_executor(executor, main))
main()
except (TypeError) as ex:
error="Error: " + str(ex)
#print(error)
except (KeyboardInterrupt):
exit()
print("\nIOT is afgesloten\n")
sys.exit(0)
except (SystemExit):
print("\nIOT is geforceert afgelosten\n")
| 25.357616 | 105 | 0.71298 |
0abd370b6b3c7d06f851a685777b6e689527ccf7 | 8,184 | py | Python | peps/converters.py | idjaw/pythondotorg | 8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649 | [
"Apache-2.0"
] | null | null | null | peps/converters.py | idjaw/pythondotorg | 8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649 | [
"Apache-2.0"
] | 2 | 2022-01-13T03:57:42.000Z | 2022-03-12T01:01:40.000Z | peps/converters.py | idjaw/pythondotorg | 8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649 | [
"Apache-2.0"
] | null | null | null | import re
import os
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from pages.models import Page, Image
PEP_TEMPLATE = 'pages/pep-page.html'
pep_url = lambda num: 'dev/peps/pep-{}/'.format(num)
def check_paths():
""" Checks to ensure our PEP_REPO_PATH is setup correctly """
if not hasattr(settings, 'PEP_REPO_PATH'):
raise ImproperlyConfigured("No PEP_REPO_PATH in settings")
if not os.path.exists(settings.PEP_REPO_PATH):
raise ImproperlyConfigured("PEP_REPO_PATH in settings does not exist")
def convert_pep0():
"""
Take existing generated pep-0000.html and convert to something suitable
for a Python.org Page returns the core body HTML necessary only
"""
check_paths()
pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html')
pep0_content = open(pep0_path).read()
soup = BeautifulSoup(pep0_content)
body_children = list(soup.body.children)
# Grab header and PEP body
header = body_children[3]
pep_content = body_children[7]
# Fix PEP links
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
# Remove Version from header
header_rows = header.find_all('th')
for t in header_rows:
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return ''.join([header.prettify(), pep_content.prettify()])
def get_pep0_page(commit=True):
"""
Using convert_pep0 above, create a CMS ready pep0 page and return it
pep0 is used as the directory index, but it's also an actual pep, so we
return both Page objects.
"""
pep0_content = convert_pep0()
pep0_page, _ = Page.objects.get_or_create(path='dev/peps/')
pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/')
for page in [pep0_page, pep0000_page]:
page.content = pep0_content
page.content_markup_type = 'html'
page.title = "PEP 0 -- Index of Python Enhancement Proposals (PEPs)"
page.template_name = PEP_TEMPLATE
if commit:
page.save()
return pep0_page, pep0000_page
def fix_headers(soup, data):
""" Remove empty or unwanted headers and find our title """
header_rows = soup.find_all('th')
for t in header_rows:
if 'Version:' in t.text:
if t.next_sibling.text == '$Revision$':
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if 'Last-Modified:' in t.text:
if '$Date$'in t.next_sibling.text:
t.parent.extract()
if t.next_sibling.text == '':
t.parent.extract()
if t.text == 'Title:':
data['title'] = t.next_sibling.text
if t.text == 'Content-Type:':
t.parent.extract()
if 'Version:' in t.text and 'N/A' in t.next_sibling.text:
t.parent.extract()
return soup, data
def convert_pep_page(pep_number, content):
"""
Handle different formats that pep2html.py outputs
"""
check_paths()
data = {
'title': None,
}
if '<html>' in content:
soup = BeautifulSoup(content)
data['title'] = soup.title.text
if not re.search(r'PEP \d+', data['title']):
data['title'] = 'PEP {} -- {}'.format(
pep_number,
soup.title.text,
)
header = soup.body.find('div', class_="header")
header, data = fix_headers(header, data)
data['header'] = header.prettify()
main_content = soup.body.find('div', class_="content")
data['main_content'] = main_content.prettify()
data['content'] = ''.join([
data['header'],
data['main_content']
])
else:
soup = BeautifulSoup(content)
soup, data = fix_headers(soup, data)
if not data['title']:
data['title'] = "PEP {} -- ".format(pep_number)
else:
if not re.search(r'PEP \d+', data['title']):
data['title'] = "PEP {} -- {}".format(
pep_number,
data['title'],
)
data['content'] = soup.prettify()
# Fix PEP links
pep_content = BeautifulSoup(data['content'])
body_links = pep_content.find_all("a")
pep_href_re = re.compile(r'pep-(\d+)\.html')
for b in body_links:
m = pep_href_re.search(b.attrs['href'])
# Skip anything not matching 'pep-XXXX.html'
if not m:
continue
b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1))
data['content'] = pep_content.prettify()
hg_link = "https://hg.python.org/peps/file/tip/pep-{0}.txt".format(pep_number)
data['content'] += """Source: <a href="{0}">{0}</a>""".format(hg_link)
return data
def get_pep_page(pep_number, commit=True):
"""
Given a pep_number retrieve original PEP source text, rst, or html.
Get or create the associated Page and return it
"""
pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number))
if not os.path.exists(pep_path):
print("PEP Path '{}' does not exist, skipping".format(pep_path))
pep_content = convert_pep_page(pep_number, open(pep_path).read())
pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number))
# Remove leading zeros from PEP number for display purposes
pep_number_string = str(pep_number)
pep_number_string = re.sub(r'^0+', '', pep_number_string)
pep_page.title = pep_content['title']
pep_page.content = pep_content['content']
pep_page.content_markup_type = 'html'
pep_page.template_name = PEP_TEMPLATE
if commit:
pep_page.save()
return pep_page
| 29.228571 | 85 | 0.615225 |
0abda1cb427ed8f070a7f02e638f35191861013c | 68 | py | Python | venv/Lib/site-packages/toolz/sandbox/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 3,749 | 2015-01-01T06:53:12.000Z | 2022-03-31T13:36:10.000Z | venv/Lib/site-packages/toolz/sandbox/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 276 | 2015-01-01T15:34:41.000Z | 2022-03-17T02:16:35.000Z | venv/Lib/site-packages/toolz/sandbox/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 256 | 2015-01-18T04:29:48.000Z | 2022-03-31T00:10:13.000Z | from .core import EqualityHashKey, unzip
from .parallel import fold
| 22.666667 | 40 | 0.823529 |
0abdfc5e117d17fbbf96aa6e5e9c1b706bacee2c | 95 | py | Python | interface/app/__init__.py | caglorithm/accel | 7fe5c13ea9559565c599633bdb3318c8fbc57088 | [
"MIT"
] | 31 | 2019-12-07T01:27:19.000Z | 2021-12-19T08:12:18.000Z | interface/app/__init__.py | caglorithm/accel | 7fe5c13ea9559565c599633bdb3318c8fbc57088 | [
"MIT"
] | null | null | null | interface/app/__init__.py | caglorithm/accel | 7fe5c13ea9559565c599633bdb3318c8fbc57088 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__, static_folder='static')
from app import routes
| 15.833333 | 45 | 0.778947 |
0abe087af168de7f10f0e7fc51d33adc2b129507 | 2,421 | py | Python | implementations/python3/tests/CAPDU.py | sebastien-riou/SATL | b95d0e784d2e8e1384381d4d5b8b448d3d1798cf | [
"Apache-2.0"
] | 4 | 2020-05-13T10:13:55.000Z | 2021-10-20T04:43:07.000Z | implementations/python3/tests/CAPDU.py | TiempoSecure/SATL | b95d0e784d2e8e1384381d4d5b8b448d3d1798cf | [
"Apache-2.0"
] | 4 | 2020-07-22T16:06:31.000Z | 2021-07-25T19:51:41.000Z | implementations/python3/tests/CAPDU.py | TiempoSecure/SATL | b95d0e784d2e8e1384381d4d5b8b448d3d1798cf | [
"Apache-2.0"
] | 2 | 2019-05-12T21:15:00.000Z | 2020-09-23T09:05:24.000Z | import os
import pysatl
from pysatl import CAPDU
if __name__ == "__main__":
#check __repr__
expected = "pysatl.CAPDU.from_hexstr('00112233015502')"
capdu=None
exec("capdu="+expected)
assert(expected==repr(capdu))
#check well formed inputs
check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
#check we tolerate less well formed inputs
check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("""0x00 0x11 0x22
0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04))
LC_cases = [0,1,2,254,255,256,257,65534,65535]
LE_cases = LC_cases + [65536]
for LC in LC_cases:
for LE in LE_cases:
print(LC,LE)
check(*gencase(LC=LC, LE=LE))
| 32.28 | 114 | 0.53449 |
0abf250849dcb075b82b1ca50e27cc3adefcc742 | 3,993 | py | Python | src/mgls_bootstrapping.py | rosich/mgls | 64c924f59adba2dddf44bb70a84868173f0b7120 | [
"MIT"
] | null | null | null | src/mgls_bootstrapping.py | rosich/mgls | 64c924f59adba2dddf44bb70a84868173f0b7120 | [
"MIT"
] | null | null | null | src/mgls_bootstrapping.py | rosich/mgls | 64c924f59adba2dddf44bb70a84868173f0b7120 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from math import sin, cos, tan, atan, pi, acos, sqrt, exp, log10
import sys, os
import copy
import random
import numpy as np
import multiprocessing as mp
import ConfigParser
sys.path.append('./bin')
import mGLS, mMGLS
sys.path.append('./src')
from EnvGlobals import Globals
import mgls_io
import mgls_mc
from mgls_lib import *
#definitions and constants
to_radians = pi/180.0
to_deg = 1.0/to_radians
#-------------------------
def _gls_instance_Ndim_bootstrapping(n_runs):
"""executes n_runs instances of MGLS for with previous data shuffle
"""
cpu_periodogram = list()
for iter in range(n_runs):
"""
#shuffle RV's and their errors. Repetition is not allowed
comb_rv_err = zip(Globals.rv, Globals.rv_err)
random.shuffle(comb_rv_err)
Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err)
"""
#allowing repetition
rv = [0.0]*len(Globals.time)
rv_err = [0.0]*len(Globals.time)
for i in range(len(Globals.time)):
index = int(random.uniform(0,len(Globals.time)))
rv[i] = Globals.rv[index]
rv_err[i] = Globals.rv_err[index]
Globals.rv = rv
Globals.rv_err = rv_err
opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000)
pwr_opt, fitting_coeffs, A = mgls(opt_state)
cpu_periodogram.append(pwr_opt) #save the best period determination (highest power)
return cpu_periodogram
def fap(bootstrapping_stats, pwr):
"""returns FAP for a given pwr. i.e. how many realizations overcome
a given power, over unit.
"""
return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats)
def fap_levels(bootstrapping_stats):
"""determines which power a FAP of 1, 0.1, 0.01 % is reached
"""
FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in %
n_bs = len(bootstrapping_stats)
#sort bootstrapping_stats vector ascendently
sorted_pwr = sorted(bootstrapping_stats)
return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))]
def parallel_Mdim_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
"""
except ZeroDivisionError:
print "Error: Zero division error. Restarted parallel bootstapping"
"""
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def parallel_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def Mdim_bootstrapping(max_pow):
"""
"""
#n_bootstrapping = 500 #iterations
bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping)
print "\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%"
print "FAP Levels:", fap_levels(bootstrapping_stats)
print "Total bootstapping samples: ", len(bootstrapping_stats)
return bootstrapping_stats
| 31.690476 | 91 | 0.672176 |
0abf69ab54ec15326e13cf19d070cb3b005d83d2 | 495 | py | Python | mgmt/src/constants.py | pcaruana/sombrio | 3b669fc83e0227a69b673b5555d88e15b55c397c | [
"MIT"
] | null | null | null | mgmt/src/constants.py | pcaruana/sombrio | 3b669fc83e0227a69b673b5555d88e15b55c397c | [
"MIT"
] | null | null | null | mgmt/src/constants.py | pcaruana/sombrio | 3b669fc83e0227a69b673b5555d88e15b55c397c | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""
constants.py - Contains all constants used by the device manager
Author:
- Pablo Caruana (pablo dot caruana at gmail dot com)
Date: 12/3/2016
"""
number_of_rows = 3 # total number rows of Index Servers
number_of_links = 5 # number of links to be sent to Crawler
number_of_chunks = 5 # number of chunks to be sent to Index Builder
number_of_comps = 10 # number of components managed by each watchdog
| 38.076923 | 79 | 0.656566 |
0abfe16c350b956230d3407edf8eac65ac07365b | 1,015 | py | Python | XDoG/XDoG.py | STomoya/sketchify | 93c068042f02172505457cc15cb0bef673666be3 | [
"MIT"
] | null | null | null | XDoG/XDoG.py | STomoya/sketchify | 93c068042f02172505457cc15cb0bef673666be3 | [
"MIT"
] | null | null | null | XDoG/XDoG.py | STomoya/sketchify | 93c068042f02172505457cc15cb0bef673666be3 | [
"MIT"
] | null | null | null |
import cv2
import numpy as np
# This config is found by the author
# modify if not the desired output
XDoG_config = dict(
size=0,
sigma=0.6,
eps=-15,
phi=10e8,
k=2.5,
gamma=0.97
)
if __name__ == "__main__":
gen_xdog_image('sample.jpg', 'dog.jpg') | 26.025641 | 60 | 0.613793 |
0ac1668c9f200fa1e8cd7c054395a35fadf64190 | 8,070 | py | Python | lm/validate.py | ericlin8545/grover | 3ac6e506f2e1a859d98cc2c3fb57ba251be31484 | [
"Apache-2.0"
] | 864 | 2019-06-18T18:53:58.000Z | 2022-03-04T22:36:52.000Z | lm/validate.py | ericlin8545/grover | 3ac6e506f2e1a859d98cc2c3fb57ba251be31484 | [
"Apache-2.0"
] | 62 | 2019-06-20T19:37:39.000Z | 2022-02-10T00:14:49.000Z | lm/validate.py | ericlin8545/grover | 3ac6e506f2e1a859d98cc2c3fb57ba251be31484 | [
"Apache-2.0"
] | 224 | 2019-06-18T18:45:56.000Z | 2022-03-29T17:46:30.000Z | # Original work Copyright 2018 The Google AI Language Team Authors.
# Modified work Copyright 2019 Rowan Zellers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from lm.modeling import model_fn_builder, GroverConfig
import tensorflow as tf
from lm.dataloader import input_fn_builder
import numpy as np
import tempfile
import h5py
from google.cloud import storage
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"config_file", 'configs/base.json',
"The config json file corresponding to the pre-trained news model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string(
"validation_name", 'preds.h5',
"Name to use")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained model).")
flags.DEFINE_integer(
"max_seq_length", 1024,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("batch_size", 32, "Batch size used for eval")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
# This is a handy little utility so that we can save the perplexities to TPU
def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1):
"""
:param array: Single dimension array
:param target: target to search for
:param return_first_match: If true, return the first index that matches, otherwise, return the last one
:param default_value: Index to return if there was no match
:return: index of the first match, or -1 if nothing
"""
assert array.ndim == 1
matching_inds = np.where(array == target)[0]
if len(matching_inds) > 0:
if return_first_match:
return int(matching_inds[0])
else:
return int(matching_inds[-1])
return default_value
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 37.534884 | 108 | 0.688352 |
0ac16994f053988d4add08873e022a2c2ce12964 | 5,055 | py | Python | robo/fmin/entropy_search.py | fuhuifang/RoBo | 036bbaa0e59032577e2611d8ba304384b397c7f6 | [
"BSD-3-Clause"
] | null | null | null | robo/fmin/entropy_search.py | fuhuifang/RoBo | 036bbaa0e59032577e2611d8ba304384b397c7f6 | [
"BSD-3-Clause"
] | null | null | null | robo/fmin/entropy_search.py | fuhuifang/RoBo | 036bbaa0e59032577e2611d8ba304384b397c7f6 | [
"BSD-3-Clause"
] | null | null | null | import logging
import george
import numpy as np
from robo.priors.default_priors import DefaultPrior
from robo.models.gaussian_process import GaussianProcess
from robo.models.gaussian_process_mcmc import GaussianProcessMCMC
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition_functions.information_gain import InformationGain
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.marginalization import MarginalizationGPMCMC
from robo.initial_design import init_latin_hypercube_sampling
logger = logging.getLogger(__name__)
def entropy_search(objective_function, lower, upper, num_iterations=30,
maximizer="random", model="gp_mcmc",
n_init=3, output_path=None, rng=None):
"""
Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search
algorithm by Henning and Schuler[1].
[1] Entropy search for information-efficient global optimization.
P. Hennig and C. Schuler.
JMLR, (1), 2012.
Parameters
----------
objective_function: function
The objective function that is minimized. This function gets a numpy array (D,) as input and returns
the function value (scalar)
lower: np.ndarray (D,)
The lower bound of the search space
upper: np.ndarray (D,)
The upper bound of the search space
num_iterations: int
The number of iterations (initial design + BO)
maximizer: {"random", "scipy", "differential_evolution"}
Defines how the acquisition function is maximized.
model: {"gp", "gp_mcmc"}
The model for the objective function.
n_init: int
Number of points for the initial design. Make sure that it is <= num_iterations.
output_path: string
Specifies the path where the intermediate output after each iteration will be saved.
If None no output will be saved to disk.
rng: numpy.random.RandomState
Random number generator
Returns
-------
dict with all results
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert np.all(lower < upper), "Lower bound >= upper bound"
assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = np.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls,
ndim=n_dims)
kernel = cov_amp * exp_kernel
prior = DefaultPrior(len(kernel) + 1)
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
if model == "gp":
gp = GaussianProcess(kernel, prior=prior, rng=rng,
normalize_output=False, normalize_input=True,
lower=lower, upper=upper)
elif model == "gp_mcmc":
gp = GaussianProcessMCMC(kernel, prior=prior,
n_hypers=n_hypers,
chain_length=200,
burnin_steps=100,
normalize_input=True,
normalize_output=False,
rng=rng, lower=lower, upper=upper)
else:
print("ERROR: %s is not a valid model!" % model)
return
a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)
if model == "gp":
acquisition_func = a
elif model == "gp_mcmc":
acquisition_func = MarginalizationGPMCMC(a)
if maximizer == "random":
max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)
else:
print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer)
return
bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,
initial_design=init_latin_hypercube_sampling,
initial_points=n_init, rng=rng, output_path=output_path)
x_best, f_min = bo.run(num_iterations)
results = dict()
results["x_opt"] = x_best
results["f_opt"] = f_min
results["incumbents"] = [inc for inc in bo.incumbents]
results["incumbent_values"] = [val for val in bo.incumbents_values]
results["runtime"] = bo.runtime
results["overhead"] = bo.time_overhead
results["X"] = [x.tolist() for x in bo.X]
results["y"] = [y for y in bo.y]
return results
| 39.492188 | 112 | 0.656775 |
0ac18453ebf1417fb6591ada4674116fa981b20f | 402 | py | Python | biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py | ck-tm/biserici-inlemnite | c9d12127b92f25d3ab2fcc7b4c386419fe308a4e | [
"MIT"
] | null | null | null | biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py | ck-tm/biserici-inlemnite | c9d12127b92f25d3ab2fcc7b4c386419fe308a4e | [
"MIT"
] | null | null | null | biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py | ck-tm/biserici-inlemnite | c9d12127b92f25d3ab2fcc7b4c386419fe308a4e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.13 on 2021-10-29 11:07
from django.db import migrations, models
| 21.157895 | 61 | 0.606965 |
0ac20eefa93e74fa6f679df0410321e3088f3827 | 664 | py | Python | services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py | bcgov/mds | 6c427a66a5edb4196222607291adef8fd6677038 | [
"Apache-2.0"
] | 25 | 2018-07-09T19:04:37.000Z | 2022-03-15T17:27:10.000Z | services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py | areyeslo/mds | e8c38e593e09b78e2a57009c0d003d6c4bfa32e6 | [
"Apache-2.0"
] | 983 | 2018-04-25T20:08:07.000Z | 2022-03-31T21:45:20.000Z | services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py | areyeslo/mds | e8c38e593e09b78e2a57009c0d003d6c4bfa32e6 | [
"Apache-2.0"
] | 58 | 2018-05-15T22:35:50.000Z | 2021-11-29T19:40:52.000Z | from app.api.utils.models_mixins import Base
from app.extensions import db
| 36.888889 | 97 | 0.724398 |
0ac2127dd527328224d7a0dde62602b62da1bdb4 | 678 | py | Python | lgtv_rs232/commands/remote_control/remote_control_lock.py | davo22/lgtv_rs232 | 40562cddf7acdf6fa95124029595e3838dd9e7b0 | [
"MIT"
] | null | null | null | lgtv_rs232/commands/remote_control/remote_control_lock.py | davo22/lgtv_rs232 | 40562cddf7acdf6fa95124029595e3838dd9e7b0 | [
"MIT"
] | null | null | null | lgtv_rs232/commands/remote_control/remote_control_lock.py | davo22/lgtv_rs232 | 40562cddf7acdf6fa95124029595e3838dd9e7b0 | [
"MIT"
] | null | null | null | from enum import Enum
| 22.6 | 81 | 0.70649 |
0ac365363d4be305aa9c1fbf0e6475792a5ae142 | 253 | py | Python | com/bridgelabz/programs/powerof2.py | aashishogale/FunctionalPrograms-Python- | d297bdb78112ef03274a10a58efc90da27f51b14 | [
"MIT"
] | null | null | null | com/bridgelabz/programs/powerof2.py | aashishogale/FunctionalPrograms-Python- | d297bdb78112ef03274a10a58efc90da27f51b14 | [
"MIT"
] | null | null | null | com/bridgelabz/programs/powerof2.py | aashishogale/FunctionalPrograms-Python- | d297bdb78112ef03274a10a58efc90da27f51b14 | [
"MIT"
] | null | null | null | import sys
from com.bridgelabz.utility.Utility import Utility
PowerOf2().start() | 23 | 50 | 0.624506 |
0ac3dcb6f4a277998e57f0001095aaf45bef6fae | 2,256 | py | Python | app/main.py | MichaelLeeman/Job_Web_Scraper | 29205d84f1190830a77174ce8272f4f79bb3468b | [
"MIT"
] | null | null | null | app/main.py | MichaelLeeman/Job_Web_Scraper | 29205d84f1190830a77174ce8272f4f79bb3468b | [
"MIT"
] | 4 | 2020-05-25T19:54:58.000Z | 2020-05-25T19:55:03.000Z | app/main.py | MichaelLeeman/Job_Web_Scraper | 29205d84f1190830a77174ce8272f4f79bb3468b | [
"MIT"
] | 1 | 2020-07-02T13:06:52.000Z | 2020-07-02T13:06:52.000Z | # This program scraps data from job postings on the website workinstartups.com and appends it to an excel worksheet.
import os
from datetime import datetime, timedelta
from selenium import webdriver
from app import web_scraper
from app import excel
job_list, last_date = [], None
file_path = os.path.abspath("main.py").rstrip('/app/main.py') + '//Workbooks' + "//Job_Openings.xlsx"
print("-" * 75, "-" * 75, "\n\t\t\t\t\t\t\t JOB WEB SCRAPER", "-" * 75, "-" * 75, sep="\n")
print("\n")
# If the Job_Openings workbook already exists then append the jobs not already in the worksheet
# by checking the date of the first job in excel, since the last time the site was scraped.
if os.path.isfile(file_path):
print("Job_Opening excel file already exists. Loading workbook.", "-" * 75, sep="\n")
workbook, worksheet = excel.load_xlsx(file_path)
last_scrape_date = excel.get_first_job_date(worksheet)
last_scrape_date = datetime.strptime(last_scrape_date, "%d-%b-%Y")
# If not, create a new workbook and append all of the jobs posted within the month
else:
print("Creating new Excel workbook.", "-" * 75, sep="\n")
current_date = datetime.today()
date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of weeks in a month
last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight
workbook, worksheet = excel.init_xlsx(worksheet_title="Job Openings")
# Open webdriver to workinstartups.com and create soup
print("Creating soup and opening Chrome webdriver", "-"*75, sep="\n")
URL = "https://workinstartups.com/job-board/jobs-in/london"
soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0)
driver = webdriver.Chrome('./chromedriver')
driver.get(URL)
driver.find_element_by_link_text('Close').click()
# Scrap the jobs from workinstartups.com and update the worksheet with the found jobs
print("Scraping jobs from workinstartups.com. Please wait.", "-" * 75, sep="\n")
job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver)
print("Scraping finished. Updating and saving Excel workbook.", "-" * 75, sep="\n")
driver.close()
excel.update_xlsx(worksheet, job_list)
excel.save_xlsx(workbook, file_path)
print("Finished!", sep="\n")
| 47 | 116 | 0.735816 |
0ac3e100821a287c22e2857e9d532f5d8e059c8b | 2,723 | py | Python | src/trusted/validator_arm/dgen_output.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | 1 | 2021-12-23T00:36:43.000Z | 2021-12-23T00:36:43.000Z | src/trusted/validator_arm/dgen_output.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | src/trusted/validator_arm/dgen_output.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Some common boilerplates and helper functions for source code generation
in files dgen_test_output.py and dgen_decode_output.py.
"""
HEADER_BOILERPLATE ="""/*
* Copyright 2013 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can
* be found in the LICENSE file.
*/
// DO NOT EDIT: GENERATED CODE
"""
NOT_TCB_BOILERPLATE="""#ifndef NACL_TRUSTED_BUT_NOT_TCB
#error This file is not meant for use in the TCB
#endif
"""
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
"""Adds comment '// ' string after newlines."""
def ifdef_name(filename):
""" Generates the ifdef name to use for the given filename"""
return filename.replace("/", "_").replace(".", "_").upper() + "_"
def GetNumberCodeBlocks(separators):
"""Gets the number of code blocks to break classes into."""
num_blocks = len(separators) + 1
assert num_blocks >= 2
return num_blocks
def FindBlockIndex(filename, format, num_blocks):
"""Returns true if the filename matches the format with an
index in the range [1, num_blocks]."""
for block in range(1, num_blocks+1):
suffix = format % block
if filename.endswith(suffix):
return block
raise Exception("Can't find block index: %s" % filename)
def GetDecodersBlock(n, separators, decoders, name_fcn):
"""Returns the (sorted) list of decoders to include
in block n, assuming decoders are split using
the list of separators."""
num_blocks = GetNumberCodeBlocks(separators)
assert n > 0 and n <= num_blocks
return [decoder for decoder in decoders
if ((n == 1
or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and
(n == num_blocks or
not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))]
def IsPrefixLeDecoder(prefix, decoder, name_fcn):
"""Returns true if the prefix is less than or equal to the
corresponding prefix length of the decoder name."""
decoder_name = name_fcn(decoder)
prefix_len = len(prefix)
decoder_len = len(decoder_name)
decoder_prefix = (decoder_name[0:prefix_len]
if prefix_len < decoder_len
else decoder_name)
return prefix <= decoder_prefix
| 31.298851 | 76 | 0.693353 |
0ac3e6f75c6ad2e83d2f026142ba224b4bab8c20 | 2,507 | py | Python | src/data_loader/input_data_loader.py | ChristopherBrix/Debona | f000f3d483b2cc592233d0ba2a1a0327210562c8 | [
"BSD-2-Clause"
] | 2 | 2020-07-26T09:48:22.000Z | 2021-09-30T01:51:13.000Z | src/data_loader/input_data_loader.py | ChristopherBrix/Debona | f000f3d483b2cc592233d0ba2a1a0327210562c8 | [
"BSD-2-Clause"
] | 2 | 2022-01-13T03:56:13.000Z | 2022-03-12T01:03:29.000Z | src/data_loader/input_data_loader.py | ChristopherBrix/Debona | f000f3d483b2cc592233d0ba2a1a0327210562c8 | [
"BSD-2-Clause"
] | null | null | null |
"""
Functions for loading input data.
Author: Patrick Henriksen <[email protected]>
"""
import os
import numpy as np
def load_img(path: str, img_nums: list, shape: tuple) -> np.array:
"""
Loads a image in the human-readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
shape:
The shape of a single image.
Returns:
The images as a MxCx28x28 numpy array.
"""
images = np.zeros((len(img_nums), *shape), dtype=float)
for idx, i in enumerate(img_nums):
file = os.path.join(path, "image" + str(i))
with open(file, "r") as f:
data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]]
images[idx, :, :] = np.array(data).reshape(*shape)
return images
def load_mnist_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads a mnist image from the neurify dataset.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx28x28 numpy array.
"""
return load_img(path, img_nums, (28, 28))
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads the Cifar10 images in human readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx3x32x32 numpy array.
"""
return load_img(path, img_nums, (3, 32, 32))
def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100,
image_shape: tuple = (3, 32, 32)) -> tuple:
"""
Loads the images from the eran csv.
Args:
The csv path
Returns:
images, targets
"""
num_images = 100
images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32)
targets_array = np.zeros(num_images, dtype=int)
with open(img_csv, "r") as file:
for j in range(num_images):
line_arr = file.readline().split(",")
targets_array[j] = int(line_arr[0])
images_array[j] = [float(pixel) for pixel in line_arr[1:]]
return images_array.reshape((num_images, *image_shape)), targets_array
| 25.845361 | 101 | 0.603111 |
0ac42e49c824529d0aa71dbe888c2a691322545e | 2,527 | py | Python | ui_splash_screen.py | hirokiyaginuma/scriptspinner-software | 87185f237f76feeee33a2b74a4d05be088bde011 | [
"Unlicense"
] | null | null | null | ui_splash_screen.py | hirokiyaginuma/scriptspinner-software | 87185f237f76feeee33a2b74a4d05be088bde011 | [
"Unlicense"
] | null | null | null | ui_splash_screen.py | hirokiyaginuma/scriptspinner-software | 87185f237f76feeee33a2b74a4d05be088bde011 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'splash_screen.ui'
##
## Created by: Qt User Interface Compiler version 5.15.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
| 37.716418 | 140 | 0.646617 |
0ac44ba5690cb44ecf9e208ad61f69b8762610fd | 634 | py | Python | tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | 4 | 2015-10-10T00:30:55.000Z | 2020-07-27T19:45:54.000Z | tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py | tedye/leetcode | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @param {integer} sum
# @return {boolean}
def hasPathSum(self, root, sum):
if not root: return False
if not root.right and not root.left:
return sum == root.val
r = False
l = False
if root.right:
r = self.hasPathSum(root.right,sum-root.val)
if root.left:
l = self.hasPathSum(root.left,sum-root.val)
return r or l
| 634 | 634 | 0.545741 |
0ac4b5f3fcc2b83c0b6c655a23b542fa299d00d2 | 41,041 | py | Python | pandas/io/sql.py | danbirken/pandas | fa8a5ca1dd27c4169727070ddbdcb248002fddb4 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/io/sql.py | danbirken/pandas | fa8a5ca1dd27c4169727070ddbdcb248002fddb4 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/io/sql.py | danbirken/pandas | fa8a5ca1dd27c4169727070ddbdcb248002fddb4 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, timedelta
import warnings
import traceback
import itertools
import re
import numpy as np
import pandas.core.common as com
from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
#------------------------------------------------------------------------------
# Helper functions
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, coerce=True, unit=format)
elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, coerce=True, unit=format)
else:
return to_datetime(col, coerce=True, format=format)
def _parse_date_columns(data_frame, parse_dates):
""" Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
#------------------------------------------------------------------------------
#--- Deprecated tquery and uquery
def tquery(sql, con=None, cur=None, retry=True):
"""
DEPRECATED. Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con, params).fetchall()
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: depreciated, cursor is obtained from connection
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is depreciated, and will be removed in future versions. "
"You can use ``execute(...).fetchall()`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur)
result = _safe_fetch(cur)
if con is not None:
try:
cur.close()
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
print('Failed to commit, may need to restart interpreter')
else:
raise
traceback.print_exc()
if retry:
return tquery(sql, con=con, retry=False)
if result and len(result[0]) == 1:
# python 3 compat
result = list(lzip(*result)[0])
elif result is None: # pragma: no cover
result = []
return result
def uquery(sql, con=None, cur=None, retry=True, params=None):
"""
DEPRECATED. Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for update queries.
To obtain the same result in the future, you can use the following:
>>> execute(sql, con).rowcount
Parameters
----------
sql: string
SQL query to be executed
con: DBAPI2 connection
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is depreciated, and will be removed in future versions. "
"You can use ``execute(...).rowcount`` instead.",
FutureWarning)
cur = execute(sql, con, cur=cur, params=params)
result = cur.rowcount
try:
con.commit()
except Exception as e:
excName = e.__class__.__name__
if excName != 'OperationalError':
raise
traceback.print_exc()
if retry:
print('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
#------------------------------------------------------------------------------
#--- Read and write to DataFrames
def read_sql_table(table_name, con, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy engine, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Sqlite DBAPI conncection mode not supported
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
Returns
-------
DataFrame
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
pandas_sql = PandasSQLAlchemy(con)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_sql(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed or database table name.
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string, optional
column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table (only used when reading
a table).
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query).
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, PandasSQLLegacy):
return pandas_sql.read_sql(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
if pandas_sql.has_table(sql):
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns)
else:
return pandas_sql.read_sql(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True,
index_label=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label)
def has_table(table_name, con, flavor='sqlite'):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor)
return pandas_sql.has_table(table_name)
table_exists = has_table
_MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated "
"and will be removed in future versions. "
"MySQL will be further supported with SQLAlchemy engines.")
def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
try:
import sqlalchemy
if isinstance(con, sqlalchemy.engine.Engine):
return PandasSQLAlchemy(con, meta=meta)
else:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)
except ImportError:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return PandasSQLLegacy(con, flavor, is_cursor=is_cursor)
# ---- SQL without SQLAlchemy ---
# Flavour specific sql strings and handler class for access to DBs without
# SQLAlchemy installed
# SQL type convertions for each DB
_SQL_TYPES = {
'text': {
'mysql': 'VARCHAR (63)',
'sqlite': 'TEXT',
},
'float': {
'mysql': 'FLOAT',
'sqlite': 'REAL',
},
'int': {
'mysql': 'BIGINT',
'sqlite': 'INTEGER',
},
'datetime': {
'mysql': 'DATETIME',
'sqlite': 'TIMESTAMP',
},
'date': {
'mysql': 'DATE',
'sqlite': 'TIMESTAMP',
},
'bool': {
'mysql': 'BOOLEAN',
'sqlite': 'INTEGER',
}
}
# SQL enquote and wildcard symbols
_SQL_SYMB = {
'mysql': {
'br_l': '`',
'br_r': '`',
'wld': '%s'
},
'sqlite': {
'br_l': '[',
'br_r': ']',
'wld': '?'
}
}
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
def get_schema(frame, name, flavor='sqlite', keys=None, con=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
keys : string or sequence
columns to use a primary key
con: an open SQL database connection object or an SQLAlchemy engine
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
"""
if con is None:
if flavor == 'mysql':
warnings.warn(_MYSQL_WARNING, FutureWarning)
return _get_schema_legacy(frame, name, flavor, keys)
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name)
def _get_schema_legacy(frame, name, flavor, keys=None):
"""Old function from 0.13.1. To keep backwards compatibility.
When mysql legacy support is dropped, it should be possible to
remove this code
"""
lookup_type = lambda dtype: get_sqltype(dtype, flavor)
column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes))
if flavor == 'sqlite':
columns = ',\n '.join('[%s] %s' % x for x in column_types)
else:
columns = ',\n '.join('`%s` %s' % x for x in column_types)
keystr = ''
if keys is not None:
if isinstance(keys, string_types):
keys = (keys,)
keystr = ', PRIMARY KEY (%s)' % ','.join(keys)
template = """CREATE TABLE %(name)s (
%(columns)s
%(keystr)s
);"""
create_statement = template % {'name': name, 'columns': columns,
'keystr': keystr}
return create_statement
# legacy names, with depreciation warnings and copied docs
def read_frame(*args, **kwargs):
"""DEPRECIATED - use read_sql
"""
warnings.warn("read_frame is depreciated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def frame_query(*args, **kwargs):
"""DEPRECIATED - use read_sql
"""
warnings.warn("frame_query is depreciated, use read_sql", FutureWarning)
return read_sql(*args, **kwargs)
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECIATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is depreciated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs)
# Append wrapped function docstrings
read_frame.__doc__ += read_sql.__doc__
frame_query.__doc__ += read_sql.__doc__
| 33.806425 | 103 | 0.608002 |
0ac61484010824f5bc86d5e3f43da1576d3d9bbb | 4,411 | py | Python | Systerm/meta.py | ZytroCode/Systerm | 688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7 | [
"MIT"
] | 1 | 2022-03-01T02:36:29.000Z | 2022-03-01T02:36:29.000Z | Systerm/meta.py | ZytroCode/Systerm | 688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7 | [
"MIT"
] | 1 | 2022-03-04T03:20:50.000Z | 2022-03-04T03:20:50.000Z | Systerm/meta.py | ZytroCode/Systerm | 688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7 | [
"MIT"
] | null | null | null | """Meta is a module contains objects that will customize the behavior of python."""
from abc import ABC
from abc import ABCMeta
from abc import abstractmethod
from typing import Any
from typing import Callable
import Systerm
# Metaclass
# Object class
# List class
# Dictionary class
# Recreating ABC
ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)})
def get_namespaces(object: Object) -> Dictionary:
"""Gets the namespaces of an object."""
return object.__namespaces__
def get_magics(object: Object) -> Dictionary:
"""Gets the magic methods of an object."""
return object.__magics__
def get_attributes(object: Object) -> Dictionary:
"""Gets the attributes of an object."""
return object.__attributes__
def get_publics(object: Object) -> Dictionary:
"""Gets the public namespaces of an object."""
return object.__publics__
def get_privates(object: Object) -> Dictionary:
"""Gets the private namespaces of an object."""
return object.__privates__
def get_protecteds(object: Object) -> Dictionary:
"""Gets the protected namespaces of an object."""
return object.__protecteds__
# Initializing Systerm.module
from Systerm._setup import init_module
module = init_module()
# MetaMod class
module.modules[__name__].__class__ = MetaMod
| 30.42069 | 102 | 0.608025 |
0ac6cf77a3b421f63bd83476f536c84c12d3066c | 11,859 | py | Python | samples/apps/txregulator/tests/txregulatorclient.py | iqsarv/CCF | 5cc33a1f0e06eb2a25dc1ebd0e2153881962b889 | [
"Apache-2.0"
] | 1 | 2020-02-03T21:57:22.000Z | 2020-02-03T21:57:22.000Z | samples/apps/txregulator/tests/txregulatorclient.py | kuychaco/CCF | e11acde3be6a7d2213fe5b406b959bb5bb64361d | [
"Apache-2.0"
] | null | null | null | samples/apps/txregulator/tests/txregulatorclient.py | kuychaco/CCF | e11acde3be6a7d2213fe5b406b959bb5bb64361d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import infra.e2e_args
import infra.ccf
import infra.jsonrpc
import logging
from time import gmtime, strftime
import csv
import random
from loguru import logger as LOG
if __name__ == "__main__":
args = infra.e2e_args.cli_args(add)
args.package = args.app_script and "libluageneric" or "liblogging"
run(args)
| 39.795302 | 117 | 0.474239 |
0ac72633419a62f181f2995c29a463e6cede8eca | 4,925 | py | Python | src/finmag/sim/hysteresis.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
] | 10 | 2018-03-24T07:43:17.000Z | 2022-03-26T10:42:27.000Z | src/finmag/sim/hysteresis.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
] | 21 | 2018-03-26T15:08:53.000Z | 2021-07-10T16:11:14.000Z | src/finmag/sim/hysteresis.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
] | 7 | 2018-04-09T11:50:48.000Z | 2021-06-10T09:23:25.000Z | import os
import re
import glob
import logging
import textwrap
import fileinput
import numpy as np
from finmag.energies import Zeeman
from finmag.util.helpers import norm
log = logging.getLogger(name="finmag")
def hysteresis(sim, H_ext_list, fun=None, **kwargs):
"""
Set the applied field to the first value in `H_ext_list` (which should
be a list of external field vectors) and then call the relax() method.
When convergence is reached, the field is changed to the next one in
H_ext_list, and so on until all values in H_ext_list are exhausted.
Note: The fields in H_ext_list are applied *in addition to* any Zeeman
interactions that are already present in the simulation.
In particular, if only one external field should be present then
do not add any Zeeman interactions before calling this method.
If you would like to perform a certain action (e.g. save a VTK
snapshot of the magnetisation) at the end of each relaxation stage,
use the sim.schedule() command with the directive 'at_end=True' as
in the following example:
sim.schedule('save_vtk', at_end=True, ...)
sim.hysteresis(...)
*Arguments*
H_ext_list: list of 3-vectors
List of external fields, where each field can have any of
the forms accepted by Zeeman.__init__() (see its docstring
for more details).
fun: callable
The user can pass a function here (which should accept the
Simulation object as its only argument); this function is
called after each relaxation and determines the return
value (see below). For example, if
fun = (lambda sim: sim.m_average[0])
then the return value is a list of values representing the
average x-component of the magnetisation at the end of
each relaxation.
All other keyword arguments are passed on to the relax() method.
See its documentation for details.
*Return value*
If `fun` is not None then the return value is a list containing an
accumulation of all the return values of `fun` after each stage.
Otherwise the return value is None.
"""
if H_ext_list == []:
return
# Add a new Zeeman interaction, initialised to zero.
H = Zeeman((0, 0, 0))
sim.add(H)
# We keep track of the current stage of the hysteresis loop.
cur_stage = 0
num_stages = len(H_ext_list)
res = []
try:
while True:
H_cur = H_ext_list[cur_stage]
log.info(
"Entering hysteresis stage #{} ({} out of {}). Current field: "
"{}".format(cur_stage, cur_stage + 1, num_stages, H_cur))
H.set_value(H_cur)
sim.relax(**kwargs)
cur_stage += 1
if fun is not None:
retval = fun(sim)
res.append(retval)
log.debug("hysteresis callback function '{}' returned "
"value: {}".format(fun.__name__, retval))
except IndexError:
log.info("Hysteresis is finished.")
log.info("Removing the applied field used for hysteresis.")
sim.remove_interaction(H.name)
return res or None
def hysteresis_loop(sim, H_max, direction, N, **kwargs):
"""
Compute a hysteresis loop. This is a specialised convenience
version of the more general `hysteresis` method. It computes a
hysteresis loop where the external field is applied along a
single axis and changes magnitude from +H_max to -H_max and
back (using N steps in each direction).
The return value is a pair (H_vals, m_vals), where H_vals is
the list of field strengths at which a relaxation is performed
and m_vals is a list of scalar values containing, for each
field value, the averaged value of the magnetisation along the
axis `direction` (after relaxation has been reached). Thus the
command plot(H_vals, m_vals) could be used to plot the
hysteresis loop.
direction -- a vector indicating the direction of the
external field (will be normalised
automatically)
H_max -- maximum field strength
N -- number of data points to compute in each direction
(thus the total number of data points for the entire
loop will be 2*N-1)
kwargs -- any keyword argument accepted by the hysteresis() method
"""
d = np.array(direction)
H_dir = d / norm(d)
H_norms = list(np.linspace(H_max, -H_max, N)) + \
list(np.linspace(-H_max, H_max, N))
H_vals = [h * H_dir for h in H_norms]
m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs)
# projected lengths of the averaged magnetisation values along the axis
# `H_dir`
m_vals = [np.dot(m, H_dir) for m in m_avg]
return (H_norms, m_vals)
| 34.929078 | 79 | 0.650355 |
0ac87693a78b8ba6514e5ac5aa8d9530546bb44b | 39,691 | py | Python | uiSetup.py | smokedpirate/Encryption-hash-generator | 47bf3f1f6b6b24ca3e9078fefe46b1e6409d59e5 | [
"Apache-2.0"
] | 4 | 2020-09-24T16:34:03.000Z | 2020-10-23T09:52:59.000Z | uiSetup.py | Atharv-Khatri/Password-Encryption-Generator-Timathon-Submission- | 3a3db2fa9dc27c8f604d0eb0917e8ffa717f4786 | [
"Apache-2.0"
] | 1 | 2020-08-02T08:46:06.000Z | 2020-08-02T08:46:06.000Z | uiSetup.py | Atharv-Khatri/Password-Encryption-Generator-Timathon-Submission- | 3a3db2fa9dc27c8f604d0eb0917e8ffa717f4786 | [
"Apache-2.0"
] | 1 | 2020-08-02T08:33:46.000Z | 2020-08-02T08:33:46.000Z |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import QtGui, QtCore
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 54.222678 | 105 | 0.659192 |
0ac88c66372990e2da39877dd262a4baa72b4bfd | 791 | py | Python | yxtx/myApp/migrations/0017_chat.py | wjh112233/yxtx | f118c2b9983ca48b099f2c328487e23f5430303f | [
"Apache-2.0"
] | null | null | null | yxtx/myApp/migrations/0017_chat.py | wjh112233/yxtx | f118c2b9983ca48b099f2c328487e23f5430303f | [
"Apache-2.0"
] | null | null | null | yxtx/myApp/migrations/0017_chat.py | wjh112233/yxtx | f118c2b9983ca48b099f2c328487e23f5430303f | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.2 on 2020-03-17 08:44
from django.db import migrations, models
| 30.423077 | 91 | 0.556258 |
0ac8bc92bddd721b23be9da9373cb90b73f83f01 | 1,200 | py | Python | core/controllers/services.py | willingc/oh-missions-oppia-beta | 3d97903a5155ec67f135b1aa2c02f3bb39eb02e7 | [
"Apache-2.0"
] | null | null | null | core/controllers/services.py | willingc/oh-missions-oppia-beta | 3d97903a5155ec67f135b1aa2c02f3bb39eb02e7 | [
"Apache-2.0"
] | 2 | 2021-06-10T23:58:39.000Z | 2021-12-13T20:51:34.000Z | core/controllers/services.py | willingc/oh-missions-oppia-beta | 3d97903a5155ec67f135b1aa2c02f3bb39eb02e7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for miscellaneous services."""
__author__ = 'Tarashish Mishra'
import base64
import json
from core.controllers import base
| 31.578947 | 77 | 0.726667 |
0ac98e5cdb6676a542021f48c116aa5fa733e705 | 16,208 | py | Python | convoy/crypto.py | hebinhuang/batch-shipyard | f87d94850380bee273eb51c5c35381952a5722b8 | [
"MIT"
] | null | null | null | convoy/crypto.py | hebinhuang/batch-shipyard | f87d94850380bee273eb51c5c35381952a5722b8 | [
"MIT"
] | null | null | null | convoy/crypto.py | hebinhuang/batch-shipyard | f87d94850380bee273eb51c5c35381952a5722b8 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import base64
import collections
import getpass
import logging
import os
try:
import pathlib2 as pathlib
except ImportError:
import pathlib
import tempfile
import stat
import subprocess
# local imports
from . import settings
from . import util
# create logger
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_SSH_KEY_PREFIX = 'id_rsa_shipyard'
_REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX)
# named tuples
PfxSettings = collections.namedtuple(
'PfxSettings', ['filename', 'passphrase', 'sha1'])
def get_ssh_key_prefix():
# type: (None) -> str
"""Get SSH key prefix
:rtype: str
:return: ssh key prefix
"""
return _SSH_KEY_PREFIX
def get_remotefs_ssh_key_prefix():
# type: (None) -> str
"""Get remote fs SSH key prefix
:rtype: str
:return: ssh key prefix for remote fs
"""
return _REMOTEFS_SSH_KEY_PREFIX
def generate_rdp_password():
# type: (None) -> str
"""Generate an RDP password
:rtype: str
:return: rdp password
"""
return base64.b64encode(os.urandom(8))
def generate_ssh_keypair(export_path, prefix=None):
# type: (str, str) -> tuple
"""Generate an ssh keypair for use with user logins
:param str export_path: keypair export path
:param str prefix: key prefix
:rtype: tuple
:return: (private key filename, public key filename)
"""
if util.is_none_or_empty(prefix):
prefix = _SSH_KEY_PREFIX
privkey = pathlib.Path(export_path, prefix)
pubkey = pathlib.Path(export_path, prefix + '.pub')
if privkey.exists():
old = pathlib.Path(export_path, prefix + '.old')
if old.exists():
old.unlink()
privkey.rename(old)
if pubkey.exists():
old = pathlib.Path(export_path, prefix + '.pub.old')
if old.exists():
old.unlink()
pubkey.rename(old)
logger.info('generating ssh key pair to path: {}'.format(export_path))
subprocess.check_call(
['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', ''''''])
return (privkey, pubkey)
def check_ssh_private_key_filemode(ssh_private_key):
# type: (pathlib.Path) -> bool
"""Check SSH private key filemode
:param pathlib.Path ssh_private_key: SSH private key
:rtype: bool
:return: private key filemode is ok
"""
if util.on_windows():
return True
fstat = ssh_private_key.stat().st_mode
modes = frozenset((stat.S_IRWXG, stat.S_IRWXO))
return not any([_mode_check(fstat, x) for x in modes])
def connect_or_exec_ssh_command(
remote_ip, remote_port, ssh_private_key, username, sync=True,
shell=False, tty=False, ssh_args=None, command=None):
# type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool
"""Connect to node via SSH or execute SSH command
:param str remote_ip: remote ip address
:param int remote_port: remote port
:param pathlib.Path ssh_private_key: SSH private key
:param str username: username
:param bool sync: synchronous execution
:param bool shell: execute with shell
:param bool tty: allocate pseudo-tty
:param tuple ssh_args: ssh args
:param tuple command: command
:rtype: int or subprocess.Process
:return: return code or subprocess handle
"""
if not ssh_private_key.exists():
raise RuntimeError('SSH private key file not found at: {}'.format(
ssh_private_key))
# ensure file mode is set properly for the private key
if not check_ssh_private_key_filemode(ssh_private_key):
logger.warning(
'SSH private key filemode is too permissive: {}'.format(
ssh_private_key))
# execute SSH command
ssh_cmd = [
'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_private_key), '-p', str(remote_port),
]
if tty:
ssh_cmd.append('-t')
if util.is_not_empty(ssh_args):
ssh_cmd.extend(ssh_args)
ssh_cmd.append('{}@{}'.format(username, remote_ip))
if util.is_not_empty(command):
ssh_cmd.extend(command)
logger.info('{} node {}:{} with key {}'.format(
'connecting to' if util.is_none_or_empty(command)
else 'executing command on', remote_ip, remote_port, ssh_private_key))
if sync:
return util.subprocess_with_output(ssh_cmd, shell=shell)
else:
return util.subprocess_nowait_pipe_stdout(
ssh_cmd, shell=shell, pipe_stderr=True)
def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a private key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a public key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def _parse_sha1_thumbprint_openssl(output):
# type: (str) -> str
"""Get SHA1 thumbprint from buffer
:param str buffer: buffer to parse
:rtype: str
:return: sha1 thumbprint of buffer
"""
# return just thumbprint (without colons) from the above openssl command
# in lowercase. Expected openssl output is in the form:
# SHA1 Fingerprint=<thumbprint>
return ''.join(util.decode_string(
output).strip().split('=')[1].split(':')).lower()
def get_sha1_thumbprint_pfx(pfxfile, passphrase):
# type: (str, str) -> str
"""Get SHA1 thumbprint of PFX
:param str pfxfile: name of the pfx file to export
:param str passphrase: passphrase for pfx
:rtype: str
:return: sha1 thumbprint of pfx
"""
if pfxfile is None:
raise ValueError('pfxfile is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# compute sha1 thumbprint of pfx
pfxdump = subprocess.check_output(
['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin',
'pass:' + passphrase]
)
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0])
def get_sha1_thumbprint_pem(pemfile):
# type: (str) -> str
"""Get SHA1 thumbprint of PEM
:param str pfxfile: name of the pfx file to export
:rtype: str
:return: sha1 thumbprint of pem
"""
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile],
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate()[0])
def generate_pem_pfx_certificates(config):
# type: (dict) -> str
"""Generate a pem and a derived pfx file
:param dict config: configuration dict
:rtype: str
:return: sha1 thumbprint of pfx
"""
# gather input
pemfile = settings.batch_shipyard_encryption_public_key_pem(config)
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
if pemfile is None:
pemfile = util.get_input('Enter public key PEM filename to create: ')
if pfxfile is None:
pfxfile = util.get_input('Enter PFX filename to create: ')
if passphrase is None:
while util.is_none_or_empty(passphrase):
passphrase = getpass.getpass('Enter password for PFX: ')
if len(passphrase) == 0:
print('passphrase cannot be empty')
privatekey = pemfile + '.key'
# generate pem file with private key and no password
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
try:
subprocess.check_call(
['openssl', 'req', '-new', '-nodes', '-x509', '-newkey',
'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730',
'-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard']
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
logger.debug('created public key PEM file: {}'.format(pemfile))
# convert pem to pfx for Azure Batch service
subprocess.check_call(
['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey',
privatekey, '-in', f.name, '-certfile', f.name,
'-passin', 'pass:', '-passout', 'pass:' + passphrase]
)
logger.debug('created PFX file: {}'.format(pfxfile))
finally:
# remove rsa private key file
fp = pathlib.Path(privatekey)
if fp.exists():
fp.unlink()
# remove temp cert pem
fp = pathlib.Path(f.name)
if fp.exists():
fp.unlink()
# get sha1 thumbprint of pfx
return get_sha1_thumbprint_pfx(pfxfile, passphrase)
def get_encryption_pfx_settings(config):
# type: (dict) -> tuple
"""Get PFX encryption settings from configuration
:param dict config: configuration settings
:rtype: tuple
:return: pfxfile, passphrase, sha1 tp
"""
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint(
config)
# manually get thumbprint of pfx if not exists in config
if util.is_none_or_empty(sha1_cert_tp):
if pfx_passphrase is None:
pfx_passphrase = getpass.getpass('Enter password for PFX: ')
sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase)
settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint(
config, sha1_cert_tp)
return PfxSettings(
filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp)
def _rsa_encrypt_string(data, config):
# type: (str, dict) -> str
"""RSA encrypt a string
:param str data: clear text data to encrypt
:param dict config: configuration dict
:rtype: str
:return: base64-encoded cipher text
"""
if util.is_none_or_empty(data):
raise ValueError('invalid data to encrypt')
inkey = settings.batch_shipyard_encryption_public_key_pem(config)
derived = False
if inkey is None:
# derive pem from pfx
derived = True
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(
config)
inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None)
try:
if inkey is None:
raise RuntimeError('public encryption key is invalid')
proc = subprocess.Popen(
['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ciphertext = util.base64_encode_string(
proc.communicate(input=util.encode_string(data))[0])
if proc.returncode != 0:
raise RuntimeError(
'openssl encryption failed with returncode: {}'.format(
proc.returncode))
return ciphertext
finally:
if derived:
fp = pathlib.Path(inkey)
if fp.exists():
fp.unlink()
def _rsa_decrypt_string_with_pfx(ciphertext, config):
# type: (str, dict) -> str
"""RSA decrypt a string
:param str ciphertext: cipher text in base64
:param dict config: configuration dict
:rtype: str
:return: decrypted cipher text
"""
if util.is_none_or_empty(ciphertext):
raise ValueError('invalid ciphertext to decrypt')
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None)
if pemfile is None:
raise RuntimeError('cannot decrypt without valid private key')
cleartext = None
try:
data = util.base64_decode_string(ciphertext)
proc = subprocess.Popen(
['openssl', 'rsautl', '-decrypt', '-inkey', pemfile],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cleartext = proc.communicate(input=data)[0]
finally:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
return cleartext
def encrypt_string(enabled, string, config):
# type: (bool, str, dict) -> str
"""Encrypt a string
:param bool enabled: if encryption is enabled
:param str string: string to encrypt
:param dict config: configuration dict
:rtype: str
:return: encrypted string if enabled
"""
if enabled:
return _rsa_encrypt_string(string, config)
else:
return string
| 35.311547 | 79 | 0.653258 |
0ac9b8651f0cd02d3cb27eefe5c6577d55fc334a | 4,080 | py | Python | libs/configs/COCO/cfgs_res50_1x_coco_v3.py | lj-ecjtu/Cascade_FPN_Tensorflow-master | 40fcd2c10f057b3f015ca1380d7db102e967391f | [
"MIT"
] | 43 | 2019-04-25T08:07:49.000Z | 2021-08-24T08:33:37.000Z | libs/configs/COCO/cfgs_res50_1x_coco_v3.py | lj-ecjtu/Cascade_FPN_Tensorflow-master | 40fcd2c10f057b3f015ca1380d7db102e967391f | [
"MIT"
] | 16 | 2019-05-11T03:51:19.000Z | 2021-10-09T08:26:18.000Z | libs/configs/COCO/cfgs_res50_1x_coco_v3.py | lj-ecjtu/Cascade_FPN_Tensorflow-master | 40fcd2c10f057b3f015ca1380d7db102e967391f | [
"MIT"
] | 15 | 2019-04-29T03:26:35.000Z | 2020-05-26T05:35:39.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
'''
gluoncv backbone + multi_gpu
'''
# ------------------------------------------------
VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3'
NET_NAME = 'resnet50_v1d'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2,3,4,5,6,7"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 80000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = False
FIXED_BLOCKS = 0 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
CUDA9 = True
EVAL_THRESHOLD = 0.5
RPN_LOCATION_LOSS_WEIGHT = 1.
RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
BATCH_SIZE = 1
WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE)
LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE
DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000
MAX_ITERATION = 20*SAVE_WEIGHTS_INTE
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'coco' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1333
CLASS_NUM = 80
# --------------------------------------------- Network_config
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001
IS_ASSIGN = True
# ---------------------------------------------Anchor config
USE_CENTER_OFFSET = True
LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]
ANCHOR_SCALES = [1.0]
ANCHOR_RATIOS = [0.5, 1., 2.0]
ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]]
ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0]
# --------------------------------------------FPN config
SHARE_HEADS = True
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 6000
RPN_MAXIMUM_PROPOSAL_TEST = 1000
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.25
ADD_GTBOXES_TO_TRAIN = False
| 31.384615 | 100 | 0.684069 |
0acab913205c6b28e2e38031c30bbb139185f389 | 3,055 | py | Python | python/delta/tests/test_exceptions.py | vibhaska/delta | 0e16356ff46520404e2376d048f002ca74f6dc0c | [
"Apache-2.0"
] | 1 | 2022-01-18T10:52:49.000Z | 2022-01-18T10:52:49.000Z | python/delta/tests/test_exceptions.py | vibhaska/delta | 0e16356ff46520404e2376d048f002ca74f6dc0c | [
"Apache-2.0"
] | null | null | null | python/delta/tests/test_exceptions.py | vibhaska/delta | 0e16356ff46520404e2376d048f002ca74f6dc0c | [
"Apache-2.0"
] | 1 | 2022-03-06T09:29:55.000Z | 2022-03-06T09:29:55.000Z | #
# Copyright (2020) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import delta.exceptions as exceptions
from delta.testing.utils import DeltaTestCase
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=4)
| 41.849315 | 87 | 0.734206 |
0acabac25e7f182a0cc9d197e74fb9a54f708fdd | 629 | py | Python | day10/samematrix.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | day10/samematrix.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | day10/samematrix.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null |
print("Enter the 1st matrix")
first_matrix = matrix_form()
print(first_matrix)
print("Enter the 2nd matrix")
sec_matrix = matrix_form()
print(sec_matrix)
check_matrix(first_matrix,sec_matrix) | 22.464286 | 45 | 0.63434 |
0acb3e8369864a2998734321cae251dc26fd05fa | 2,884 | py | Python | extractFeatures.py | PatrickJReed/Longboard | f6ca4a6e51c91296894aee2e02b86f83b38c080a | [
"MIT"
] | 1 | 2020-04-27T19:55:29.000Z | 2020-04-27T19:55:29.000Z | extractFeatures.py | PatrickJReed/Longboard2 | f6ca4a6e51c91296894aee2e02b86f83b38c080a | [
"MIT"
] | 1 | 2020-02-26T18:06:09.000Z | 2020-02-26T18:06:09.000Z | extractFeatures.py | PatrickJReed/Longboard | f6ca4a6e51c91296894aee2e02b86f83b38c080a | [
"MIT"
] | null | null | null | #!/home/ubuntu/miniconda2/bin/python
from __future__ import division
import sys
import glob, os, gc
import uuid
import os.path
import csv
import numpy as np
from time import time
from subprocess import (call, Popen, PIPE)
from itertools import product
import shutil
import re
import pickle
from boto3.session import Session
import boto3
import h5py
import umap
import hdbscan
from keras.models import load_model
from keras.models import Model
from keras import backend as K
from keras.utils import multi_gpu_model
##Path to Data
basepath = "/home/ubuntu/"
subject = sys.argv[1]
with open("config.txt") as f:
config = [line.rstrip() for line in f]
print config[0]
print config[1]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3 = boto3.client ('s3')
s3.download_file('for-ndar',os.path.join("metadata/", subject + ".txt"),os.path.join(basepath,subject + ".txt"))
with open(subject + ".txt") as f:
Cells = [line.rstrip() for line in f]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5'))
feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5'))
parallel_model = multi_gpu_model(feat_extractor, gpus=2)
count = 0
for cell in Cells:
print(cell)
cell_size=0
cell_ids = []
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5'))
f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r')
cell_ids = f['ID']
for cid in cell_ids:
cid = cid.decode('utf-8')
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5'))
xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r')
os.remove(os.path.join(basepath,cell+'_'+cid+'.h5'))
if count == 0:
X = xyz['X']
Y = xyz['Y']
Z = parallel_model.predict(X, batch_size = 128)
count+=1
length = len(Y)
U = [cid] * length
else:
X = xyz['X']
Y = np.append(Y,xyz['Y'], axis=0)
z = feat_extractor.predict(X, batch_size = 128)
Z = np.append(Z,z, axis=0)
length = len(xyz['Y'])
U = U + ([cid] * length)
print(Z.shape)
hf = h5py.File(subject+'_ef.h5', 'w')
hf.create_dataset('Y', data=Y)
hf.create_dataset('Z', data=Z)
hf.close()
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5'))
call(['sudo', 'shutdown', '-h', 'now']) | 31.692308 | 138 | 0.662968 |
0accac5244ae00b90c3dcaa313e0ad6674cf5f7f | 5,284 | py | Python | kepler.py | mdbernard/astrodynamics | cf98df6cd17086e3675c1f7c2fce342d5322ee51 | [
"MIT"
] | null | null | null | kepler.py | mdbernard/astrodynamics | cf98df6cd17086e3675c1f7c2fce342d5322ee51 | [
"MIT"
] | 14 | 2020-11-10T02:37:15.000Z | 2022-02-07T01:11:29.000Z | kepler.py | mdbernard/astrodynamics | cf98df6cd17086e3675c1f7c2fce342d5322ee51 | [
"MIT"
] | null | null | null | import numpy as np
from stumpff import C, S
from CelestialBody import BODIES
from numerical import newton, laguerre
from lagrange import calc_f, calc_fd, calc_g, calc_gd
def kepler_chi(chi, alpha, r0, vr0, mu, dt):
''' Kepler's Equation of the universal anomaly, modified
for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \
(1 - alpha*r0)*chi**3*S(z) + \
r0*chi - np.sqrt(mu)*dt
def dkepler_dchi(chi, alpha, r0, vr0, mu, dt):
''' Derivative of Kepler's Equation of the universal anomaly,
modified for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \
(1 - alpha*r0)*chi**2*C(z) + r0
def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt):
''' Second derivative of Kepler's Equation of the universal
anomaly, modified for use in numerical solvers. '''
z = alpha*chi**2
S_ = S(z)
return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \
chi*(1 - z*S_)*(1 - alpha*r0)
def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100):
''' Solve Kepler's Equation of the universal anomaly chi using the specified
numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering
Students, 4 ed, Curtis.
:param r_0: `iterable` (km) initial position 3-vector
:param v_0: `iterable` (km/s) initial velocity 3-vector
:param dt: `float` (s) time after initial state to solve for r, v as 3-vectors
:param body: `CelestialBody` (--) the celestial body to use for orbital parameters
:param method: `str` (--) which numerical method to use to solve Kepler's Equation
:param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision)
:param max_iters: `int` (--) maximum number of iterations in numerical method before breaking
:return: (km) final position 3-vector, (km/s) final velocity 3-vector
'''
VALID_METHODS = ('laguerre', 'newton')
mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body
r0 = np.linalg.norm(r_0) # (km) initial position magnitude
v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude
vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude
alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis
chi0 = np.sqrt(mu)*np.abs(alpha)*dt
if method not in VALID_METHODS:
print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.')
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
elif method == 'newton':
chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)
else: # method == 'laguerre'
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
f = calc_f(chi, r0, alpha)
g = calc_g(dt, mu, chi, alpha)
r_1 = f*r_0 + g*v_0
r1 = np.linalg.norm(r_1)
fd = calc_fd(mu, r1, r0, alpha, chi)
gd = calc_gd(chi, r1, alpha)
v_1 = fd*r_0 + gd*v_0
return r_1, v_1
def solve_kepler_E(e, Me, tol=1e-7, max_iters=100):
''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E),
eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital
Mechanics for Engineering Students, 4 ed, Curtis. '''
# TODO: have this function make use of one of the numerical methods in numerical.py
E = Me + e/2 if Me < np.pi else Me - e/2
ratio = f(E, e, Me)/fp(E, e)
iters = 0
while abs(ratio) > tol and iters < max_iters:
E -= ratio
ratio = f(E, e, Me)/fp(E, e)
iters += 1
E -= ratio
converged = np.abs(ratio) <= tol
return E, iters, converged
def test():
''' Test the functionality of solve_kepler_chi
and solve_kepler_laguerre using Problem 3.20 from
Orbital Mechanics for Engineering Students, 4 ed, Curtis.
'''
# given starting information
Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things
r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector
v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector
dt = 2*60*60 # (s) time of interest after initial time
# given correct answer from textbook
correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector
correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector
# solve using above methods
r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton')
r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre')
# check correctness
# tolerance based on significant figures of given answers
newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4)
laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4)
return all([newton_valid, laguerre_valid])
if __name__ == '__main__':
print(test())
| 39.140741 | 115 | 0.645912 |
0acd26a6aeb9fbb21484a68cd667f26b74d856f7 | 952 | py | Python | nicos_demo/vpgaa/setups/pgai.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_demo/vpgaa/setups/pgai.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_demo/vpgaa/setups/pgai.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'PGAA setup with XYZOmega sample table'
group = 'basic'
sysconfig = dict(
datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink']
)
includes = [
'system',
'reactor',
'nl4b',
'pressure',
'sampletable',
'pilz',
'detector',
'collimation',
]
devices = dict(
mcasink = device('nicos_mlz.pgaa.devices.MCASink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
chnsink = device('nicos_mlz.pgaa.devices.CHNSink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink',
settypes = {'point'},
),
)
startupcode = """
SetDetectors('_60p', 'LEGe')
SetEnvironment(chamber_pressure)
printinfo("============================================================")
printinfo("Welcome to the NICOS PGAI demo setup.")
printinfo("============================================================")
"""
| 23.219512 | 73 | 0.522059 |
0acd83639363e1e8109b480a9d0f9a0898831b8f | 54,720 | py | Python | tests/python/relay/test_op_level2.py | ravikumarvc/incubator-tvm | 9826947ffce0ed40e9d47a0db2abb033e394279e | [
"Apache-2.0"
] | 3 | 2021-02-23T22:06:01.000Z | 2021-09-30T09:59:17.000Z | tests/python/relay/test_op_level2.py | ravikumarvc/incubator-tvm | 9826947ffce0ed40e9d47a0db2abb033e394279e | [
"Apache-2.0"
] | 4 | 2021-03-30T11:59:59.000Z | 2022-03-12T00:40:23.000Z | tests/python/relay/test_op_level2.py | ravikumarvc/incubator-tvm | 9826947ffce0ed40e9d47a0db2abb033e394279e | [
"Apache-2.0"
] | 3 | 2021-07-20T07:40:15.000Z | 2021-08-03T08:39:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 operator test cases.
"""
import numpy as np
import tvm
from tvm import autotvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
from tvm.contrib import util
import topi.testing
if __name__ == "__main__":
test_pool1d()
test_pool2d()
test_pool3d()
test_avg_pool2d_no_count_pad()
test_lrn()
test_l2_normalize()
test_conv1d_infer_type()
test_conv2d_infer_type()
test_conv3d_infer_type()
test_bitpack_infer_type()
test_upsampling_infer_type()
test_upsampling3d_infer_type()
test_flatten_infer_type()
test_pad_infer_type()
test_pad_run()
test_conv2d_transpose_infer_type()
test_conv2d_transpose_nchw_run()
test_conv2d_transpose_nhwc_run()
test_conv1d_transpose_ncw_run()
test_conv1d_run()
test_conv2d_run()
test_conv2d_winograd()
test_conv3d_run()
test_conv3d_ndhwc_run()
test_bitserial_conv2d_infer_type()
test_batch_flatten()
test_upsampling()
test_upsampling3d()
test_conv2d_int8_intrinsics()
test_depthwise_conv2d_int8()
| 43.052714 | 101 | 0.564126 |
0ace54f568ea92472966bb73d6fa4f6d624bebbf | 6,859 | py | Python | official/nlp/transformer/utils/tokenizer_test.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | 1 | 2021-05-22T12:50:50.000Z | 2021-05-22T12:50:50.000Z | official/nlp/transformer/utils/tokenizer_test.py | DemonDamon/mask-detection-based-on-tf2odapi | 192ae544169c1230c21141c033800aa1bd94e9b6 | [
"MIT"
] | null | null | null | official/nlp/transformer/utils/tokenizer_test.py | DemonDamon/mask-detection-based-on-tf2odapi | 192ae544169c1230c21141c033800aa1bd94e9b6 | [
"MIT"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import tensorflow as tf
from official.nlp.transformer.utils import tokenizer
if __name__ == "__main__":
tf.test.main()
| 33.458537 | 81 | 0.626185 |
0ace832c1c225820e768849e83537e4e6af2dc78 | 76 | py | Python | api/api/form7_searching_utils/__init__.py | bcgov/court-of-appeal | ef773b1baa80d3aff1ac807ed01f59266d885955 | [
"Apache-2.0"
] | null | null | null | api/api/form7_searching_utils/__init__.py | bcgov/court-of-appeal | ef773b1baa80d3aff1ac807ed01f59266d885955 | [
"Apache-2.0"
] | 35 | 2021-02-02T19:30:11.000Z | 2022-03-29T12:40:42.000Z | api/api/form7_searching_utils/__init__.py | marzmehr/court-of-appeal | ef773b1baa80d3aff1ac807ed01f59266d885955 | [
"Apache-2.0"
] | 2 | 2021-02-03T17:26:23.000Z | 2021-02-05T13:35:26.000Z | from .form7_search import Form7Search
from .parse_form7 import Form7Parsing
| 25.333333 | 37 | 0.868421 |
0acf1290742f590cb6015abc57d74458d907cabb | 1,164 | py | Python | soil/build/lib/soil/openstack/snapshot.py | JackDan9/soil | ae612a4634634aace834491fbdefbc69e6167674 | [
"MIT"
] | 1 | 2020-08-06T11:58:35.000Z | 2020-08-06T11:58:35.000Z | soil/build/lib/soil/openstack/snapshot.py | JackDan9/soil | ae612a4634634aace834491fbdefbc69e6167674 | [
"MIT"
] | 4 | 2019-12-13T11:27:28.000Z | 2022-02-27T11:58:38.000Z | soil/build/lib/soil/openstack/snapshot.py | JackDan9/soil | ae612a4634634aace834491fbdefbc69e6167674 | [
"MIT"
] | null | null | null | # Copyright 2020 Soil, Inc.
from soil.openstack.base import DataBase
from soil.openstack.base import SourceBase
| 25.304348 | 63 | 0.636598 |
0acf3366802d8714bb15485c54ab7f3de9aac778 | 2,776 | py | Python | Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1dcf54522e9d20711ff1114550dc2893ed3e9ed0 | [
"MIT"
] | 1,290 | 2020-05-28T21:24:43.000Z | 2022-03-31T16:38:43.000Z | Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1dcf54522e9d20711ff1114550dc2893ed3e9ed0 | [
"MIT"
] | 1 | 2020-07-03T21:14:52.000Z | 2020-07-03T21:14:52.000Z | Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1dcf54522e9d20711ff1114550dc2893ed3e9ed0 | [
"MIT"
] | 280 | 2020-05-29T17:28:38.000Z | 2022-03-31T13:54:15.000Z | # -*- coding: utf-8 -*-
# Thanks to @skelsec for his awesome tool Pypykatz
# Checks his project here: https://github.com/skelsec/pypykatz
import codecs
import traceback
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
from pypykatz.pypykatz import pypykatz
| 36.526316 | 106 | 0.501801 |
0acf54e8a20fd816eda3589c3b616626bb4f33fb | 14,981 | py | Python | test/test_discogs.py | mglukhovsky/beets | 889e30c056a609cf71c8c8200259520230545222 | [
"MIT"
] | null | null | null | test/test_discogs.py | mglukhovsky/beets | 889e30c056a609cf71c8c8200259520230545222 | [
"MIT"
] | null | null | null | test/test_discogs.py | mglukhovsky/beets | 889e30c056a609cf71c8c8200259520230545222 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for discogs plugin.
"""
from __future__ import division, absolute_import, print_function
import unittest
from test import _common
from test._common import Bag
from test.helper import capture_log
from beetsplug.discogs import DiscogsPlugin
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 41.269972 | 79 | 0.59235 |
0acf5c8efa495629dab15411d7c1138e6f73ca8f | 1,417 | py | Python | data_structures/queue/queue_on_pseudo_stack.py | hank-chou/python | a9f729fa263bce599d2774f3f6afb5a18bcc9862 | [
"MIT"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | data_structures/queue/queue_on_pseudo_stack.py | hank-chou/python | a9f729fa263bce599d2774f3f6afb5a18bcc9862 | [
"MIT"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | data_structures/queue/queue_on_pseudo_stack.py | hank-chou/python | a9f729fa263bce599d2774f3f6afb5a18bcc9862 | [
"MIT"
] | 18 | 2020-02-09T13:00:11.000Z | 2021-03-11T08:47:36.000Z | """Queue represented by a pseudo stack (represented by a list with pop and append)"""
| 24.431034 | 85 | 0.562456 |
0ad02fbe661ef723ec6b1d7108a2d41a85831a5b | 17,018 | py | Python | darknet2ncnn.py | nihui/gen-ncnn-models | 18523f1920d9afc44ce3058087c07e09f28aa151 | [
"BSD-2-Clause"
] | 4 | 2019-12-24T15:16:18.000Z | 2021-05-14T08:12:17.000Z | darknet2ncnn.py | nihui/gen-ncnn-models | 18523f1920d9afc44ce3058087c07e09f28aa151 | [
"BSD-2-Clause"
] | null | null | null | darknet2ncnn.py | nihui/gen-ncnn-models | 18523f1920d9afc44ce3058087c07e09f28aa151 | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python
# coding: utf-8
import configparser
import numpy as np
import re,sys,os
from graph import MyGraph
from collections import OrderedDict
def unique_config_sections(config_file):
"""Convert all config sections to have unique names.
Adds unique suffixes to config sections for compability with configparser.
"""
from collections import defaultdict
import io
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
if __name__ == '__main__':
config_path = sys.argv[1]
weights_path = sys.argv[2]
mygraph = buildGraph(config_path, weights_path)
#
outputNodes = ['region_0', 'softmax_0']
stopNodes = []
inputNodes = ['darknet_0']
mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes)
mygraph.generateDot('YoloV2.dot')
#
mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
| 36.915401 | 117 | 0.534317 |
0ad05115e0ac22a4083ac999a331c7da804f1e24 | 1,058 | py | Python | music/models.py | anirudha-bs/Django_music_app | 1b80bd4299a35fb707c32307dd115074a8ecba9f | [
"Apache-2.0"
] | null | null | null | music/models.py | anirudha-bs/Django_music_app | 1b80bd4299a35fb707c32307dd115074a8ecba9f | [
"Apache-2.0"
] | null | null | null | music/models.py | anirudha-bs/Django_music_app | 1b80bd4299a35fb707c32307dd115074a8ecba9f | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import Permission, User
from django.db import models
| 37.785714 | 74 | 0.73535 |
0ad16ca68b13c3255bfd62c00d84e6b8aa940002 | 3,021 | py | Python | finex_history.py | yihming/gdax-data | 7e562f314e9ef12eb6be2df3b97190af632c4530 | [
"MIT"
] | null | null | null | finex_history.py | yihming/gdax-data | 7e562f314e9ef12eb6be2df3b97190af632c4530 | [
"MIT"
] | null | null | null | finex_history.py | yihming/gdax-data | 7e562f314e9ef12eb6be2df3b97190af632c4530 | [
"MIT"
] | null | null | null | import datetime
import calendar
import requests
import pandas as pd
import json
import os.path
import time
import MySQLdb as M
from gdax_history import timestamp_to_utcstr
if __name__ == "__main__":
main()
| 30.21 | 207 | 0.581595 |
0ad19b186920402498e9734534abe48d50e505b7 | 2,154 | py | Python | src/producers/connector.py | cvelas31/public_transportation_streaming | 903a1a147645e1b0783555db4bfc02098f7941ae | [
"MIT"
] | null | null | null | src/producers/connector.py | cvelas31/public_transportation_streaming | 903a1a147645e1b0783555db4bfc02098f7941ae | [
"MIT"
] | null | null | null | src/producers/connector.py | cvelas31/public_transportation_streaming | 903a1a147645e1b0783555db4bfc02098f7941ae | [
"MIT"
] | null | null | null | """Configures a Kafka Connector for Postgres Station data"""
import json
import logging
import requests
from settings import Settings
logger = logging.getLogger(__name__)
KAFKA_CONNECT_URL = f"{Settings.URLs.KAFKA_CONNECT_URL}/connectors"
CONNECTOR_NAME = "stations"
def configure_connector():
"""Starts and configures the Kafka Connect connector"""
logging.debug("Creating or updating kafka connect connector...")
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
logging.debug("Connector already created skipping recreation")
return
config = {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"topic.prefix": "com.connect.transportation.",
"connection.url": "jdbc:postgresql://postgres:5432/cta",
"connection.user": "cta_admin",
"connection.password": "chicago",
"batch.max.rows": "500",
"table.whitelist": "stations",
"poll.interval.ms": "5000", # Poll every 5 seconds
"mode": "incrementing",
"incrementing.column.name": "stop_id",
}
# TODO: Complete the Kafka Connect Config below.
# Directions: Use the JDBC Source Connector to connect to Postgres. Load the `stations` table
# using incrementing mode, with `stop_id` as the incrementing column name.
# Make sure to think about what an appropriate topic prefix would be, and how frequently Kafka
# Connect should run this connector (hint: not very often!)
data = json.dumps({"name": CONNECTOR_NAME, "config": config})
resp = requests.post(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=data,
)
# Ensure a healthy response was given
resp.raise_for_status()
logging.info("-------Connector created successfully-------")
if __name__ == "__main__":
configure_connector()
| 35.311475 | 98 | 0.679201 |
0ad20a796d3e2e784e9676daf81a22cf86a1d3cb | 8,474 | py | Python | liuetal2019/utils.py | wasiahmad/GATE | 1e48504a3641f00265a271a19eb6b6449fdc33bd | [
"MIT"
] | 24 | 2020-12-07T10:22:40.000Z | 2022-03-31T09:24:13.000Z | liuetal2019/utils.py | wasiahmad/GATE | 1e48504a3641f00265a271a19eb6b6449fdc33bd | [
"MIT"
] | 15 | 2021-03-22T04:52:57.000Z | 2022-01-01T18:32:31.000Z | liuetal2019/utils.py | wasiahmad/GATE | 1e48504a3641f00265a271a19eb6b6449fdc33bd | [
"MIT"
] | 8 | 2021-03-04T05:09:42.000Z | 2022-01-25T12:59:19.000Z | import io
import logging
import json
import numpy
import torch
import numpy as np
from tqdm import tqdm
from clie.inputters import constant
from clie.objects import Sentence
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def vectorize(ex, model, iseval):
"""Torchify a single example."""
words = ['!{}_{}'.format(ex.language, w) for w in ex.words]
words = [model.word_dict[w] for w in words]
knn_word = None
if ex.knn_words:
knn_word = [[model.word_dict[w] for w in knn]
for knn in ex.knn_words]
knn_word = torch.LongTensor(knn_word)
word = torch.LongTensor(words)
pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])
ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])
deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])
assert any([x == 0 for x in ex.head])
head = torch.LongTensor(ex.head)
subj_position = torch.LongTensor(ex.subj_position)
obj_position = torch.LongTensor(ex.obj_position)
type = [0] * len(ex.words)
ttype = model.type_dict[ex.subj_type]
start, end = ex.subject
type[start: end + 1] = [ttype] * (end - start + 1)
atype = model.type_dict[ex.obj_type]
start, end = ex.object
type[start: end + 1] = [atype] * (end - start + 1)
type = torch.LongTensor(type)
return {
'id': ex.id,
'language': ex.language,
'word': word,
'pos': pos,
'ner': ner,
'deprel': deprel,
'type': type,
'head': head,
'subject': ex.subj_text,
'object': ex.obj_text,
'subject_pos': subj_position,
'object_pos': obj_position,
'relation': model.label_dict[ex.relation],
'knn_word': knn_word
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
ids = [ex['id'] for ex in batch]
language = [ex['language'] for ex in batch]
use_knn = batch[0]['knn_word'] is not None
# NOTE. batch[0]['knn_word'] is a 2d list
knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0
# --------- Prepare Code tensors ---------
max_len = max([ex['word'].size(0) for ex in batch])
# Batch Code Representations
len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)
word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
labels = torch.LongTensor(batch_size)
subject = []
object = []
knn_rep = None
if use_knn:
knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)
for i, ex in enumerate(batch):
len_rep[i] = ex['word'].size(0)
labels[i] = ex['relation']
word_rep[i, :len_rep[i]] = ex['word']
head_rep[i, :len_rep[i]] = ex['head']
subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']
object_pos_rep[i, :len_rep[i]] = ex['object_pos']
pos_rep[i, :len_rep[i]] = ex['pos']
ner_rep[i, :len_rep[i]] = ex['ner']
deprel_rep[i, :len_rep[i]] = ex['deprel']
type_rep[i, :len_rep[i]] = ex['type']
subject.append(ex['subject'])
object.append(ex['object'])
if use_knn:
knn_rep[i, :len_rep[i]] = ex['knn_word']
return {
'ids': ids,
'language': language,
'batch_size': batch_size,
'len_rep': len_rep,
'word_rep': word_rep,
'knn_rep': knn_rep,
'head_rep': head_rep,
'subject': subject,
'object': object,
'subject_pos_rep': subject_pos_rep,
'object_pos_rep': object_pos_rep,
'labels': labels,
'pos_rep': pos_rep,
'ner_rep': ner_rep,
'deprel_rep': deprel_rep,
'type_rep': type_rep
}
| 34.587755 | 85 | 0.576941 |
0ad2503d07ac5b15fee30f7480f83b4ea51f1515 | 914 | py | Python | build.py | dnanexus/IndexTools | 0392b3be92ff50b401290b59e9ca6c7767fa5a96 | [
"MIT"
] | 15 | 2019-07-17T11:41:36.000Z | 2021-03-02T09:36:34.000Z | build.py | dnanexus/IndexTools | 0392b3be92ff50b401290b59e9ca6c7767fa5a96 | [
"MIT"
] | 22 | 2019-05-15T20:08:12.000Z | 2019-10-11T13:33:42.000Z | build.py | dnanexus/IndexTools | 0392b3be92ff50b401290b59e9ca6c7767fa5a96 | [
"MIT"
] | 3 | 2019-06-01T15:58:06.000Z | 2022-01-21T21:10:01.000Z | from distutils.extension import Extension
cmdclass = {}
try:
# with Cython
from Cython.Build import build_ext
cmdclass["build_ext"] = build_ext
module_src = "cgranges/python/cgranges.pyx"
except ImportError: # without Cython
module_src = "cgranges/python/cgranges.c"
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{
"ext_modules": [
Extension(
"cgranges",
sources=[module_src, "cgranges/cgranges.c"],
depends=[
"cgranges/cgranges.h",
"cgranges/khash.h",
"cgranges/python/cgranges.pyx"
],
include_dirs=["cgranges"]
)
],
"cmdclass": cmdclass
}
)
| 25.388889 | 64 | 0.504376 |
0ad2792c4efbba79b47edb4a13bc47fda219fd40 | 48 | py | Python | icarus/models/service/__init__.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 5 | 2021-03-20T09:22:55.000Z | 2021-12-20T17:01:33.000Z | icarus/models/service/__init__.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 1 | 2021-12-13T07:40:46.000Z | 2021-12-20T16:59:08.000Z | icarus/models/service/__init__.py | oascigil/icarus_edge_comp | b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b | [
"MIT"
] | 1 | 2021-11-25T05:42:20.000Z | 2021-11-25T05:42:20.000Z | # -*- coding: utf-8 -*-
from .compSpot import *
| 16 | 23 | 0.583333 |
0ad2916f049d06f5df6ddbf5e08b57510f7c1b78 | 17,212 | py | Python | gluoncv/data/kinetics400/classification.py | YvetteGuo/gluon-cv | 123af8cf9f15a879c16a5c7d12f01ce1471d85b6 | [
"Apache-2.0"
] | 1 | 2019-04-02T02:08:04.000Z | 2019-04-02T02:08:04.000Z | gluoncv/data/kinetics400/classification.py | YvetteGuo/gluon-cv | 123af8cf9f15a879c16a5c7d12f01ce1471d85b6 | [
"Apache-2.0"
] | 1 | 2019-06-06T08:39:12.000Z | 2019-06-06T08:39:12.000Z | gluoncv/data/kinetics400/classification.py | YvetteGuo/gluon-cv | 123af8cf9f15a879c16a5c7d12f01ce1471d85b6 | [
"Apache-2.0"
] | 1 | 2019-08-26T09:26:42.000Z | 2019-08-26T09:26:42.000Z | # pylint: disable=line-too-long,too-many-lines,missing-docstring
"""Kinetics400 action classification dataset."""
import os
import random
import numpy as np
from mxnet import nd
from mxnet.gluon.data import dataset
__all__ = ['Kinetics400']
| 65.444867 | 152 | 0.625552 |
0ad33111935325f80d27dfada02fe97074254f24 | 2,206 | py | Python | qf_lib/containers/futures/future_contract.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 198 | 2019-08-16T15:09:23.000Z | 2022-03-30T12:44:00.000Z | qf_lib/containers/futures/future_contract.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 13 | 2021-01-07T10:15:19.000Z | 2022-03-29T13:01:47.000Z | qf_lib/containers/futures/future_contract.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 29 | 2019-08-16T15:21:28.000Z | 2022-02-23T09:53:49.000Z | # Copyright 2016-present CERN European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
| 38.701754 | 118 | 0.704442 |
0ad331ec8ece0975704ec9214918b2580008a6a0 | 23,842 | py | Python | watcher/api/controllers/v1/action_plan.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | 64 | 2015-10-18T02:57:24.000Z | 2022-01-13T11:27:51.000Z | watcher/api/controllers/v1/action_plan.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | null | null | null | watcher/api/controllers/v1/action_plan.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | 35 | 2015-12-25T13:53:21.000Z | 2021-07-19T15:50:16.000Z | # -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Action Plan <action_plan_definition>` specifies a flow of
:ref:`Actions <action_definition>` that should be executed in order to satisfy
a given :ref:`Goal <goal_definition>`. It also contains an estimated
:ref:`global efficacy <efficacy_definition>` alongside a set of
:ref:`efficacy indicators <efficacy_indicator_definition>`.
An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when an
:ref:`Audit <audit_definition>` is successful which implies that the
:ref:`Strategy <strategy_definition>`
which was used has found a :ref:`Solution <solution_definition>` to achieve the
:ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`.
In the default implementation of Watcher, an action plan is composed of
a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of
:ref:`Actions <action_definition>` belonging to a unique branch).
However, Watcher provides abstract interfaces for many of its components,
allowing other implementations to generate and handle more complex :ref:`Action
Plan(s) <action_plan_definition>` composed of two types of Action Item(s):
- simple :ref:`Actions <action_definition>`: atomic tasks, which means it
can not be split into smaller tasks or commands from an OpenStack point of
view.
- composite Actions: which are composed of several simple
:ref:`Actions <action_definition>`
ordered in sequential and/or parallel flows.
An :ref:`Action Plan <action_plan_definition>` may be described using
standard workflow model description formats such as
`Business Process Model and Notation 2.0 (BPMN 2.0)
<http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML)
<http://www.uml.org/>`_.
To see the life-cycle and description of
:ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan
state machine <action_plan_state_machine>`.
"""
import datetime
from http import HTTPStatus
from oslo_log import log
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.applier import rpcapi
from watcher.common import exception
from watcher.common import policy
from watcher.common import utils
from watcher import objects
from watcher.objects import action_plan as ap_objects
LOG = log.getLogger(__name__)
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
pass
| 39.149425 | 79 | 0.637279 |
0ad4a301cbaa49708e90318cda5d0db992bcc1f1 | 354 | py | Python | controllers/albums.py | jeonginlee/groove_scheduler | 84e61834e940e2ff138ffeeea61fd301f3c2a244 | [
"MIT"
] | null | null | null | controllers/albums.py | jeonginlee/groove_scheduler | 84e61834e940e2ff138ffeeea61fd301f3c2a244 | [
"MIT"
] | null | null | null | controllers/albums.py | jeonginlee/groove_scheduler | 84e61834e940e2ff138ffeeea61fd301f3c2a244 | [
"MIT"
] | null | null | null | from flask import *
albums = Blueprint('albums', __name__, template_folder='templates')
| 19.666667 | 67 | 0.700565 |
0ad4ca562029351bba499bd795e4d3faca8ffc96 | 3,113 | py | Python | Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from dfirtrack_main.forms import DivisionForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Division
| 40.428571 | 85 | 0.697719 |
0ad57f93e09c3cfa475ee8a3a4f941a9c684524d | 1,613 | py | Python | run.py | shark803/Torch_serve_example_NLP | 7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105 | [
"MIT"
] | 1 | 2021-11-19T07:59:58.000Z | 2021-11-19T07:59:58.000Z | run.py | shark803/Torch_serve_example_NLP | 7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105 | [
"MIT"
] | null | null | null | run.py | shark803/Torch_serve_example_NLP | 7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105 | [
"MIT"
] | null | null | null | # coding: UTF-8
import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN')
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
args = parser.parse_args()
if __name__ == '__main__':
dataset = 'THUCNews' #
# :embedding_SougouNews.npz, :embedding_Tencent.npz, :random
# embedding = 'random'
model_name = args.model # TextCNN
from utils import build_dataset, build_iterator, get_time_dif
x = import_module('models.' + model_name)
from config import Config
config = Config(dataset)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True #
start_time = time.time()
print("Loading data...")
vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
train_iter = build_iterator(train_data, config)
dev_iter = build_iterator(dev_data, config)
test_iter = build_iterator(test_data, config)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# train
config.n_vocab = len(vocab)
model = x.Model().to(config.device)
init_network(model)
print(model.parameters)
train(config, model, train_iter, dev_iter, test_iter)
| 32.918367 | 97 | 0.726596 |
0ad5ae1fbe9b6f2bb0a59a7bd762d3ef2ea1b7ed | 22,315 | py | Python | src/tests/cfp/views/test_cfp_user.py | xhub/pretalx | 33bd07ec98ddeb5b7ff35fe7e30c4d38bef57d7e | [
"Apache-2.0"
] | null | null | null | src/tests/cfp/views/test_cfp_user.py | xhub/pretalx | 33bd07ec98ddeb5b7ff35fe7e30c4d38bef57d7e | [
"Apache-2.0"
] | null | null | null | src/tests/cfp/views/test_cfp_user.py | xhub/pretalx | 33bd07ec98ddeb5b7ff35fe7e30c4d38bef57d7e | [
"Apache-2.0"
] | null | null | null | import pytest
from django.conf import settings
from django.core import mail as djmail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django_scopes import scope
from rest_framework.authtoken.models import Token
from pretalx.submission.models import SubmissionStates
| 36.945364 | 92 | 0.6946 |
0ad630d29820371f228b1287947197de5ede3fb0 | 5,954 | py | Python | tests/mb_util.py | vasilydenisenko/modbus_rtu_slave | 8a531b776ab82c60b5d335f0565468f19a7801f5 | [
"MIT"
] | null | null | null | tests/mb_util.py | vasilydenisenko/modbus_rtu_slave | 8a531b776ab82c60b5d335f0565468f19a7801f5 | [
"MIT"
] | null | null | null | tests/mb_util.py | vasilydenisenko/modbus_rtu_slave | 8a531b776ab82c60b5d335f0565468f19a7801f5 | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2021 Vasily Denisenko, Sergey Kuznetsov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mb_bsp
PDU_SIZE_REG = 0
CONFIG_REG = 1
SLAVE_ADDR_REG = 2
CS_REG = 3
MB_MAX_WRITE_REGNUM = 123
MB_MAX_READ_REGNUM = 125
MB_MAX_REG_ADDR = 65535
MB_MAX_REG_VAL = 65535
MB_MAX_SLAVE_ADDR = 247
MB_MIN_SLAVE_ADDR = 1
MB_MAX_PDU_SIZE = 253
MB_MIN_PDU_SIZE = 1
FCODE_0x3 = 0x3
FCODE_0x6 = 0x6
FCODE_0x10 = 0x10
setattr(incr_err_count, 'count', 0)
| 25.553648 | 81 | 0.673161 |
0ad6db55250893c680ef209759e33e069cabdd9a | 4,292 | py | Python | modules/stackoverflow/models.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 1 | 2017-04-26T18:51:43.000Z | 2017-04-26T18:51:43.000Z | modules/stackoverflow/models.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | null | null | null | modules/stackoverflow/models.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | null | null | null | import fix_path
import json
import datetime
from google.appengine.ext import ndb
# Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript
dthandler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
) | 40.490566 | 98 | 0.682199 |
0ad7c645c6d3067f3c0c435d4f3782feef6cf400 | 218 | py | Python | src/main/java/com/bailei/study/beautyOfCoding/cpu50.py | sonymoon/algorithm | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | [
"Apache-2.0"
] | null | null | null | src/main/java/com/bailei/study/beautyOfCoding/cpu50.py | sonymoon/algorithm | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | [
"Apache-2.0"
] | null | null | null | src/main/java/com/bailei/study/beautyOfCoding/cpu50.py | sonymoon/algorithm | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import time
busyTime = 10
idleTime = busyTime
while True:
start = time.clock()
while time.clock() - start < busyTime:
pass
time.sleep(busyTime / 1000)
| 14.533333 | 42 | 0.614679 |
0ad85408ba998c356a370a0f1582159d01f77a69 | 8,390 | py | Python | carto/maps.py | danicarrion/carto-python | 631b018f065960baa35473e2087ce598560b9e17 | [
"BSD-3-Clause"
] | 85 | 2016-08-07T16:46:58.000Z | 2022-03-23T01:44:02.000Z | carto/maps.py | danicarrion/carto-python | 631b018f065960baa35473e2087ce598560b9e17 | [
"BSD-3-Clause"
] | 109 | 2016-08-02T18:40:04.000Z | 2021-08-23T08:08:02.000Z | carto/maps.py | danicarrion/carto-python | 631b018f065960baa35473e2087ce598560b9e17 | [
"BSD-3-Clause"
] | 29 | 2016-11-29T03:42:47.000Z | 2022-01-23T17:37:11.000Z | """
Module for working with named and anonymous maps
.. module:: carto.maps
:platform: Unix, Windows
:synopsis: Module for working with named and anonymous maps
.. moduleauthor:: Daniel Carrion <[email protected]>
.. moduleauthor:: Alberto Romeu <[email protected]>
"""
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from pyrestcli.resources import Manager, Resource
from .exceptions import CartoException, CartoRateLimitException
API_VERSION = "v1"
NAMED_API_ENDPOINT = "api/{api_version}/map/named/"
ANONYMOUS_API_ENDPOINT = "api/{api_version}/map/"
| 33.293651 | 86 | 0.555662 |
0ad87d7268affd7dfe13527d4d842a2d43c681ac | 157 | py | Python | client_driver.py | tlagore/kv_store | e3f139eabaa14d0e001193e21baf7e5c96e0358d | [
"MIT"
] | null | null | null | client_driver.py | tlagore/kv_store | e3f139eabaa14d0e001193e21baf7e5c96e0358d | [
"MIT"
] | null | null | null | client_driver.py | tlagore/kv_store | e3f139eabaa14d0e001193e21baf7e5c96e0358d | [
"MIT"
] | null | null | null | from kv_client.kv_client import KVClient
if __name__ == "__main__":
main() | 19.625 | 44 | 0.656051 |
0ad8ce46348b78515a8db8b2c9bc54898f1ab6f9 | 1,208 | py | Python | pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 206 | 2020-11-28T22:56:38.000Z | 2022-03-27T02:33:04.000Z | pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 19 | 2020-12-09T23:13:14.000Z | 2022-01-24T23:24:08.000Z | pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 28 | 2020-11-29T15:25:12.000Z | 2022-01-20T02:16:27.000Z | import operator_benchmark as op_bench
import torch
import numpy
from . import configs
"""EmbeddingBag Operator Benchmark"""
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 38.967742 | 107 | 0.724338 |
0ad9fee81c50ef01672c1f7b553d66bc07bc9155 | 3,972 | py | Python | python/dgl/geometry/capi.py | lfchener/dgl | 77f4287a4118db64c46f4f413a426e1419a09d53 | [
"Apache-2.0"
] | 9,516 | 2018-12-08T22:11:31.000Z | 2022-03-31T13:04:33.000Z | python/dgl/geometry/capi.py | lfchener/dgl | 77f4287a4118db64c46f4f413a426e1419a09d53 | [
"Apache-2.0"
] | 2,494 | 2018-12-08T22:43:00.000Z | 2022-03-31T21:16:27.000Z | python/dgl/geometry/capi.py | lfchener/dgl | 77f4287a4118db64c46f4f413a426e1419a09d53 | [
"Apache-2.0"
] | 2,529 | 2018-12-08T22:56:14.000Z | 2022-03-31T13:07:41.000Z | """Python interfaces to DGL farthest point sampler."""
from dgl._ffi.base import DGLError
import numpy as np
from .._ffi.function import _init_api
from .. import backend as F
from .. import ndarray as nd
def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result):
r"""Farthest Point Sampler
Parameters
----------
data : tensor
A tensor of shape (N, d) where N is the number of points and d is the dimension.
batch_size : int
The number of batches in the ``data``. N should be divisible by batch_size.
sample_points : int
The number of points to sample in each batch.
dist : tensor
Pre-allocated tensor of shape (N, ) for to-sample distance.
start_idx : tensor of int
Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch.
result : tensor of int
Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index.
Returns
-------
No return value. The input variable ``result`` will be overwriten with sampled indices.
"""
assert F.shape(data)[0] >= sample_points * batch_size
assert F.shape(data)[0] % batch_size == 0
_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data),
batch_size, sample_points,
F.zerocopy_to_dgl_ndarray(dist),
F.zerocopy_to_dgl_ndarray(start_idx),
F.zerocopy_to_dgl_ndarray(result))
def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):
"""
Description
-----------
The neighbor matching procedure of edge coarsening used in
`Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__
and
`Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__
for homogeneous graph coarsening. This procedure keeps picking an unmarked
vertex and matching it with one its unmarked neighbors (that maximizes its
edge weight) until no match can be done.
If no edge weight is given, this procedure will randomly pick neighbor for each
vertex.
The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching
<http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__
NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`
if you are not sure your graph is bi-directed.
Parameters
----------
graph : HeteroGraphIndex
The input homogeneous graph.
num_nodes : int
The number of nodes in this homogeneous graph.
edge_weight : tensor, optional
The edge weight tensor holding non-negative scalar weight for each edge.
default: :obj:`None`
relabel_idx : bool, optional
If true, relabel resulting node labels to have consecutive node ids.
default: :obj:`True`
Returns
-------
a 1-D tensor
A vector with each element that indicates the cluster ID of a vertex.
"""
edge_weight_capi = nd.NULL["int64"]
if edge_weights is not None:
edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)
node_label = F.full_1d(
num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))
node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)
_CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)
if F.reduce_sum(node_label < 0).item() != 0:
raise DGLError("Find unmatched node")
# reorder node id
# TODO: actually we can add `return_inverse` option for `unique`
# function in backend for efficiency.
if relabel_idx:
node_label_np = F.zerocopy_to_numpy(node_label)
_, node_label_np = np.unique(node_label_np, return_inverse=True)
return F.tensor(node_label_np)
else:
return node_label
_init_api('dgl.geometry', __name__)
| 38.563107 | 95 | 0.680514 |
0adab04d82e555974b5ee3aecff08feca7c75415 | 6,478 | py | Python | scidb/core/data.py | oxdc/sci.db | 0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb | [
"MIT"
] | null | null | null | scidb/core/data.py | oxdc/sci.db | 0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb | [
"MIT"
] | null | null | null | scidb/core/data.py | oxdc/sci.db | 0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb | [
"MIT"
] | null | null | null | import shutil
import hashlib
from pathlib import Path
from typing import TextIO, BinaryIO, IO, Union
from datetime import datetime
from os.path import getmtime
from .low import ObservableDict
def rename(self, new_name: str):
shutil.move(str(self.path), str(self.__parent__.path / new_name))
self.__data_name__ = new_name
def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]:
mode = 'r'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def creator(self,
binary: bool = False,
confirm: bool = False,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if confirm and not feedback:
return None
mode = 'x'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def writer(self,
binary: bool = False,
append: bool = True,
allow_overwrite: bool = False,
confirm: bool = True,
feedback: bool = False,
**kwargs) -> [IO, BinaryIO, TextIO, None]:
if not allow_overwrite and not append:
raise PermissionError('Trying to overwrite existed data.')
if confirm and not feedback:
return
mode = 'a' if append else 'w'
mode += 'b' if binary else ''
return open(str(self.path), mode=mode, **kwargs)
def __repr__(self):
return f"Data('{self.__data_name__}')"
def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False):
if self.path.exists() and not allow_overwrite:
return
if confirm and not feedback:
return
shutil.copyfile(str(src_path), str(self.path))
def export_file(self, dst_path: [str, Path], allow_overwrite=False):
if Path(dst_path).exists() and not allow_overwrite:
return
shutil.copyfile(str(self.path), str(dst_path))
def __calc_hash__(self, h, buffer_size: int = 131072):
if not self.path.exists():
return None
with open(str(self.path), 'rb') as file_reader:
while True:
data = file_reader.read(buffer_size)
if not data:
break
h.update(data)
return h.hexdigest()
def md5(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'md5' not in self.metadata \
or 'md5-timestamp' not in self.metadata \
or self.metadata['md5-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.md5(), buffer_size)
self.metadata['md5'] = result
self.metadata['md5-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['md5']
def sha1(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha1' not in self.metadata \
or 'sha1-timestamp' not in self.metadata \
or self.metadata['sha1-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha1(), buffer_size)
self.metadata['sha1'] = result
self.metadata['sha1-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha1']
def sha256(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]:
if not self.path.exists():
return None
last_modified_time = getmtime(str(self.path))
if require_update \
or 'sha256' not in self.metadata \
or 'sha256-timestamp' not in self.metadata \
or self.metadata['sha256-timestamp'] < last_modified_time:
result = self.__calc_hash__(hashlib.sha256(), buffer_size)
self.metadata['sha256'] = result
self.metadata['sha256-timestamp'] = datetime.now().timestamp()
return result
else:
return self.metadata['sha256']
| 35.988889 | 102 | 0.599105 |
0adacd25859bed18399a4d523ba68cd8adb2bc90 | 39,932 | py | Python | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 9 | 2019-12-29T01:47:37.000Z | 2021-12-21T13:47:41.000Z | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 1 | 2020-05-28T11:22:49.000Z | 2020-05-28T11:22:49.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients does not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if getattr(var, "_distributed_container", None) is not None:
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
revived_types.register_revived_type(
"optimizer",
lambda obj: isinstance(obj, OptimizerV2),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: _RestoredOptimizer(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access
)])
| 38.806608 | 101 | 0.698162 |
0adb9e87674ba38043bf368fb738d4c5e8ba7c5c | 362 | py | Python | escola/teste_get.py | danielrosendos/djangoRestFramework | 946bb95b8dd9976d1920302ce724572ffd9f98cf | [
"MIT"
] | 2 | 2020-07-26T15:17:23.000Z | 2020-07-26T16:50:18.000Z | escola/teste_get.py | sport129/djangoRestFramework | 946bb95b8dd9976d1920302ce724572ffd9f98cf | [
"MIT"
] | 3 | 2021-03-30T14:12:18.000Z | 2021-06-04T23:44:47.000Z | escola/teste_get.py | sport129/djangoRestFramework | 946bb95b8dd9976d1920302ce724572ffd9f98cf | [
"MIT"
] | null | null | null | import requests
headers = {
'content-type': 'application/json',
'Authorization': 'Token 80ca9f249b80e7226cdc7fcaada8d7297352f0f9'
}
url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos'
url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes'
resultado = requests.get(url=url_base_cursos, headers=headers)
assert resultado.status_code == 200 | 27.846154 | 69 | 0.756906 |
0adc55ed2f06787ab63a1224266a2dd707ce1b10 | 6,455 | py | Python | python/avi/sdk/utils/waf_policy/vdi_waf_policy.py | aaronjwood/alb-sdk | ae4c47b2228651d3f5095e7c14f081aa4adbb732 | [
"Apache-2.0"
] | null | null | null | python/avi/sdk/utils/waf_policy/vdi_waf_policy.py | aaronjwood/alb-sdk | ae4c47b2228651d3f5095e7c14f081aa4adbb732 | [
"Apache-2.0"
] | null | null | null | python/avi/sdk/utils/waf_policy/vdi_waf_policy.py | aaronjwood/alb-sdk | ae4c47b2228651d3f5095e7c14f081aa4adbb732 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 VMware, Inc.
import argparse
import json
import re
import logging
import os
import sys
from avi.sdk.avi_api import ApiSession
API_VERSION = "18.2.13"
SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI'
logger = logging.getLogger(__name__)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', action="store", help='controller user',
default='admin')
parser.add_argument('-p', '--password', action="store", help='controller user password',
default='admin')
parser.add_argument('-t', '--tenant', action="store", help='tenant name',
default='admin')
parser.add_argument('-a', '--authtoken', help='Authentication token')
parser.add_argument('-c', '--controller_ip', action="store", help='controller ip')
args = parser.parse_args()
if args.password:
api = ApiSession.get_session(args.controller_ip, args.user, args.password,
tenant=args.tenant, api_version=API_VERSION)
elif args.authtoken:
api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant,
token=args.authtoken, api_version=API_VERSION)
else:
logging.error("Either password or authtokentoken must be provided.")
sys.exit(1)
waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI)
if not waf_policy_obj:
create_vdi_waf_policy(api, args)
else:
update_waf_policy(api, args, waf_policy_obj)
| 38.652695 | 219 | 0.632223 |
0adcde8b96a5cb82b17bdf29ba072f1b54339883 | 4,101 | py | Python | src/api/bkuser_core/tests/bkiam/test_constants.py | Chace-wang/bk-user | 057f270d66a1834312306c9fba1f4e95521f10b1 | [
"MIT"
] | null | null | null | src/api/bkuser_core/tests/bkiam/test_constants.py | Chace-wang/bk-user | 057f270d66a1834312306c9fba1f4e95521f10b1 | [
"MIT"
] | null | null | null | src/api/bkuser_core/tests/bkiam/test_constants.py | Chace-wang/bk-user | 057f270d66a1834312306c9fba1f4e95521f10b1 | [
"MIT"
] | 1 | 2021-12-31T06:48:41.000Z | 2021-12-31T06:48:41.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making -(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from bkuser_core.bkiam.constants import ResourceType
from bkuser_core.categories.models import Department, ProfileCategory
from bkuser_core.tests.utils import make_simple_department
pytestmark = pytest.mark.django_db
def test_get_resource_nodes_other(self):
pc = ProfileCategory.objects.get_default()
nodes = ResourceType.get_instance_resource_nodes(pc)
assert [(x["type"], x["name"]) for x in nodes] == [("category", "")]
| 39.432692 | 115 | 0.613997 |
0add3254851b32ab4bc7e1c39aca7cbe53d6398b | 190 | py | Python | votesim/benchmarks/__init__.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 8 | 2019-10-21T23:24:51.000Z | 2021-09-14T03:04:59.000Z | votesim/benchmarks/__init__.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 2 | 2021-02-09T23:52:47.000Z | 2021-02-10T04:08:35.000Z | votesim/benchmarks/__init__.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 1 | 2019-10-21T23:32:18.000Z | 2019-10-21T23:32:18.000Z | # from votesim.benchmarks.benchrunner import (
# run_benchmark,
# get_benchmarks,
# post_benchmark,
# plot_benchmark,
# )
from votesim.benchmarks import runtools, simple | 23.75 | 47 | 0.705263 |
0add5b092c6c665d2b618a20a05d4cd299d00402 | 1,948 | py | Python | src/handler.py | MrIgumnov96/ETL-CloudDeployment | 666b85a9350460fba49f82ec90f5cddc0bdd0235 | [
"Unlicense"
] | null | null | null | src/handler.py | MrIgumnov96/ETL-CloudDeployment | 666b85a9350460fba49f82ec90f5cddc0bdd0235 | [
"Unlicense"
] | null | null | null | src/handler.py | MrIgumnov96/ETL-CloudDeployment | 666b85a9350460fba49f82ec90f5cddc0bdd0235 | [
"Unlicense"
] | null | null | null | import boto3
import src.app as app
import csv
import psycopg2 as ps
import os
from dotenv import load_dotenv
load_dotenv()
dbname = os.environ["db"]
host = os.environ["host"]
port = os.environ["port"]
user = os.environ["user"]
password = os.environ["pass"]
connection = ps.connect(dbname=dbname,
host=host,
port=port,
user=user,
password=password) | 31.419355 | 185 | 0.587269 |
0adf4b5bea842a306db59cff9711a1e6a19b7ae0 | 3,753 | py | Python | improver_tests/precipitation_type/test_utilities.py | cpelley/improver | ebf77fe2adc85ed7aec74c26671872a2e4388ded | [
"BSD-3-Clause"
] | 77 | 2017-04-26T07:47:40.000Z | 2022-03-31T09:40:49.000Z | improver_tests/precipitation_type/test_utilities.py | cpelley/improver | ebf77fe2adc85ed7aec74c26671872a2e4388ded | [
"BSD-3-Clause"
] | 1,440 | 2017-03-29T10:04:15.000Z | 2022-03-28T10:11:29.000Z | improver_tests/precipitation_type/test_utilities.py | MoseleyS/improver | ca028e3a1c842e3ff00b188c8ea6eaedd0a07149 | [
"BSD-3-Clause"
] | 72 | 2017-03-17T16:53:45.000Z | 2022-02-16T09:41:37.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Tests of precipitation_type utilities"""
import numpy as np
import pytest
from iris.exceptions import CoordinateNotFoundError
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.utilities import make_shower_condition_cube
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
def set_up_test_cube(n_thresholds=1):
"""Set up a cube testing shower condition conversion"""
thresholds = np.arange(n_thresholds)
shape = [2, 2]
shape = [n_thresholds, *shape] if n_thresholds > 0 else shape
data = np.ones(shape, dtype=FLOAT_DTYPE)
cube = set_up_probability_cube(
data,
thresholds,
variable_name="texture_of_cloud_area_fraction",
threshold_units=1,
spatial_grid="equalarea",
)
return cube
def test_basic():
"""Test that with a valid input the cube is transformed into a shower
condition cube."""
cube = set_up_test_cube()
result = make_shower_condition_cube(cube)
threshold_coord = result.coord(var_name="threshold")
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.dtype == FLOAT_DTYPE
assert (result.data == cube.data).all()
assert threshold_coord.name() == "shower_condition"
assert threshold_coord.units == 1
def test_no_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube does not have
a threshold coordinate."""
cube = set_up_test_cube()
cube.remove_coord("texture_of_cloud_area_fraction")
expected = "Input has no threshold coordinate and cannot be used"
with pytest.raises(CoordinateNotFoundError, match=expected):
make_shower_condition_cube(cube)
def test_multi_valued_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube has a multi
valued threshold coordinate."""
cube = set_up_test_cube(n_thresholds=2)
expected = "Expected a single valued threshold coordinate.*"
with pytest.raises(ValueError, match=expected):
make_shower_condition_cube(cube)
| 39.925532 | 79 | 0.742073 |
0ae04a483b4283bc6fdc84bd651d77ab70b6120c | 5,149 | py | Python | app/api/v1/models/items.py | bryan-munene/Store-Manager-DB | 40b24039189aea6854d7fcf33ccb648bb6642231 | [
"MIT"
] | null | null | null | app/api/v1/models/items.py | bryan-munene/Store-Manager-DB | 40b24039189aea6854d7fcf33ccb648bb6642231 | [
"MIT"
] | 4 | 2018-10-25T00:57:18.000Z | 2018-10-25T21:29:09.000Z | app/api/v1/models/items.py | bryan-munene/Store-Manager-DB | 40b24039189aea6854d7fcf33ccb648bb6642231 | [
"MIT"
] | null | null | null | from .db_conn import ModelSetup
| 31.206061 | 112 | 0.543212 |
0ae122f08d00736fbd1d09356f366ff9dcd6baf8 | 4,215 | py | Python | site/src/sphinx/_extensions/api.py | linxGnu/armeria | 7f4b10e66acc377dd16929157aeb417b729ce55a | [
"Apache-2.0"
] | null | null | null | site/src/sphinx/_extensions/api.py | linxGnu/armeria | 7f4b10e66acc377dd16929157aeb417b729ce55a | [
"Apache-2.0"
] | null | null | null | site/src/sphinx/_extensions/api.py | linxGnu/armeria | 7f4b10e66acc377dd16929157aeb417b729ce55a | [
"Apache-2.0"
] | null | null | null | from docutils.parsers.rst.roles import register_canonical_role, set_classes
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.writers.html import HTMLTranslator
from sphinx.errors import ExtensionError
import os
import re
| 37.633929 | 103 | 0.629656 |
0ae19706ac78f27bbbf84e3668bc38423a4a2fcd | 739 | py | Python | feaas/runners/__init__.py | tsuru/varnishapi | d63a8c8c5f9c837855509fc5af59d8213c1c91d6 | [
"BSD-3-Clause"
] | 3 | 2015-05-04T03:20:09.000Z | 2016-02-19T10:35:35.000Z | feaas/runners/__init__.py | tsuru/varnishapi | d63a8c8c5f9c837855509fc5af59d8213c1c91d6 | [
"BSD-3-Clause"
] | 3 | 2015-01-02T13:18:56.000Z | 2021-02-08T20:17:14.000Z | feaas/runners/__init__.py | tsuru/varnishapi | d63a8c8c5f9c837855509fc5af59d8213c1c91d6 | [
"BSD-3-Clause"
] | 5 | 2015-01-02T13:11:45.000Z | 2016-08-26T06:14:35.000Z | # Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import time
from feaas import storage
| 24.633333 | 57 | 0.649526 |
0ae22c03054218a911ddc84125341497677c75ac | 2,045 | py | Python | ros_buildfarm/debian_repo.py | j-rivero/ros_buildfarm | 840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88 | [
"Apache-2.0"
] | null | null | null | ros_buildfarm/debian_repo.py | j-rivero/ros_buildfarm | 840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88 | [
"Apache-2.0"
] | 1 | 2019-12-12T21:08:01.000Z | 2019-12-12T21:08:01.000Z | ros_buildfarm/debian_repo.py | j-rivero/ros_buildfarm | 840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .common import PlatformPackageDescriptor
from .http_cache import fetch_and_cache_gzip
| 36.517857 | 91 | 0.695355 |
0ae277577c0d9cf0180a37747d11d8dcd292baa5 | 57 | py | Python | player.py | Drayux/Battlematus | 1709a15b58d9274b99ec36eff1a181014d155037 | [
"MIT"
] | null | null | null | player.py | Drayux/Battlematus | 1709a15b58d9274b99ec36eff1a181014d155037 | [
"MIT"
] | null | null | null | player.py | Drayux/Battlematus | 1709a15b58d9274b99ec36eff1a181014d155037 | [
"MIT"
] | null | null | null | # PLAYER
| 9.5 | 23 | 0.561404 |
0ae2b8b9a2e89b056cf58f74862944546c4ef4a9 | 48,440 | py | Python | Framwork-Backpropagation/utils/utils_v2.py | ConvolutedDog/Implicit-Im2col-for-Backpropagation | 529a62f52903326b9289091b7d0abb45e6c7bb31 | [
"Apache-2.0"
] | null | null | null | Framwork-Backpropagation/utils/utils_v2.py | ConvolutedDog/Implicit-Im2col-for-Backpropagation | 529a62f52903326b9289091b7d0abb45e6c7bb31 | [
"Apache-2.0"
] | null | null | null | Framwork-Backpropagation/utils/utils_v2.py | ConvolutedDog/Implicit-Im2col-for-Backpropagation | 529a62f52903326b9289091b7d0abb45e6c7bb31 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
import torch
import torch.nn as nn
import torch.nn.functional as F
from graphviz import Digraph, render
from torch.autograd import Variable
def add_backward(dLoss_dnextz):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
dLoss_dz = dLoss_dnextz
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
def generate_g(model, x):
delete_allpths(pth_dir=None)
print('\n=========================== Store network model Results Start =========================')
y = model(x)
print('=========================== Store network model Results End ===========================\n')
if 'GoogLeNet' in str(model).split('\n')[0]:
g = make_dot(y[0])
return g
else:
g = make_dot(y)
return g
def gradient_backward_v2(model, img, label, num_class=1000, g_view=False):
x = Variable(img)
g = generate_g(model, x)
if g_view:
g.view()
delete_allpths(pth_dir=None)
print('\n=========================== Generate Tensors Start ====================================')
result = model(img)
print('=========================== Generate Tensors End ======================================\n')
Loss = nn.CrossEntropyLoss()
if 'GoogLeNet' in str(model).split('\n')[0]:
loss_torch = Loss(result[0], label)
else:
loss_torch = Loss(result, label)
_, connections = generate_connections(g)
last_connections = merge_connections(connections)
return_layers = get_layers(last_connections, model)
return_tensors = get_tensors(last_connections)
parameters, fc_conv_weights = get_structure_parameters(return_layers)
'''
print('================')
for i in range(len(last_connections)):
print(i, last_connections[i])
print('================')
print('================')
for i in range(len(return_layers)):
print(i, return_layers[i])
print('================')
print('================')
for i in range(len(parameters)):
print(i, parameters[i])
print('================')
print('================')
for i in range(len(return_tensors)):
if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str):
print('=========', i, return_tensors[i].shape)
print('================')
'''
import copy
return_dz = copy.deepcopy(last_connections)
featuremap = return_tensors
featuremap.append(img)
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true)
featuremap.pop(0)
return_dz.append(dLoss_dz)
#####################tensors
'''
for i in range(len(last_connections)):
print(last_connections[i])
for i in range(len(featuremap)):
if not isinstance(featuremap[i], list):
print('=========', i, featuremap[i].shape)
else:
for j in range(len(featuremap[i])):
for k in range(len(featuremap[i][j])):
print(' =========', i, j, k, featuremap[i][j][k].shape)
'''
#####################
# n
for i in range(len(parameters)):
layer = parameters[i]
if not isinstance(layer, list):
print('\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[i]
weight_z = fc_conv_weights[i]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'ReLU':
z = featuremap[i]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[i]
z = featuremap[i]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'View':
last_z = featuremap[i+1]
if 'Pool' in parameters[i+1]['layer_name']:
params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding'])
else:
params = None
dLoss_dz = view_backward(dLoss_dz, last_z, params)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Add':
dLoss_dz = add_backward(dLoss_dz)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Dropout':
if parameters[i-1]['layer_name'] == 'Dropout':
return_dz[i] = dLoss_dz
print('# Skip this layer because the layer has been calcualted!')
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\
format(layer['layer_name'])+' Backward End ==========================')
continue
p = layer['p']
mask = featuremap[i]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[i]
gamma = fc_conv_weights[i]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz[i] = dLoss_dz
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================')
elif isinstance(layer, list):
import copy
tmp_dLoss_dz = []
for j in range(len(layer)):
tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz))
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
print('\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================')
if tmp_layer['layer_name'] == 'Conv2d':
if k+1 >= len(featuremap[i-1][j]):
z = featuremap[i]
else:
z = featuremap[i-1][j][k+1]
weight_z = fc_conv_weights[i][j][k]
try:
padding = tmp_layer['padding']
except:
padding = (0, 0)
stride = tmp_layer['stride']
tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'ReLU':
z = featuremap[i-1][j][k+1]
tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'BatchNorm2d':
eps = tmp_layer['eps']
z = featuremap[i-1][j][k+1]
gamma = fc_conv_weights[i][j][k]
tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================')
print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape)
dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1]
else:
print('Not completed in gradient_backward!')
print('# Torch calculated loss: ', loss_torch.detach().numpy())
loss_torch.backward()
if 'VGG' in str(model) or 'AlexNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad))
elif 'ResNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad))
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB | 34.624732 | 172 | 0.63429 |
0ae2d03accd91cc3db5f01917f5d31fdecbb74e5 | 4,372 | py | Python | ark_nlp/factory/utils/attack.py | yubuyuabc/ark-nlp | 165d35cfacd7476791c0aeba19bf43f4f8079553 | [
"Apache-2.0"
] | 1 | 2022-03-23T05:10:55.000Z | 2022-03-23T05:10:55.000Z | ark_nlp/factory/utils/attack.py | yubuyuabc/ark-nlp | 165d35cfacd7476791c0aeba19bf43f4f8079553 | [
"Apache-2.0"
] | null | null | null | ark_nlp/factory/utils/attack.py | yubuyuabc/ark-nlp | 165d35cfacd7476791c0aeba19bf43f4f8079553 | [
"Apache-2.0"
] | null | null | null | import torch
| 31.681159 | 101 | 0.52699 |
0ae341f931ab8799a80b73c9036820e58b4d7de6 | 5,790 | py | Python | core.py | sreejithr/deepfake | c7115ce90ea281e2eb95d75f436efa102c8f2e3c | [
"MIT"
] | null | null | null | core.py | sreejithr/deepfake | c7115ce90ea281e2eb95d75f436efa102c8f2e3c | [
"MIT"
] | 3 | 2021-09-08T02:24:48.000Z | 2022-03-12T00:44:53.000Z | core.py | sreejithr/deepfake | c7115ce90ea281e2eb95d75f436efa102c8f2e3c | [
"MIT"
] | null | null | null | import cv2
import torch
import yaml
import imageio
import throttle
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from skimage.transform import resize
from scipy.spatial import ConvexHull
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from sync_batchnorm import DataParallelWithCallback
#from animate import normalize_kp
# command = [ffmpeg,
# '-y',
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', dimension,
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# 'rtmp://10.10.10.80/live/mystream']
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
source_image = imageio.imread(opt.source_image)
source_image = resize(source_image, (256, 256))[..., :3]
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not opt.cpu:
source = source.cuda()
kp_source = kp_detector(source)
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))
kp_driving_initial = None
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while True:
ret, frame = camera.read()
resized = resize(frame, (256, 256))[..., :3]
if not opt.cpu:
resized = resized.cuda()
# y = torch.tensor(np.array(resized))
# x = y.cpu().numpy()
# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# # x = y.permute(1, 2, 0)
# plt.imshow(np.array(image))
# plt.show()
driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not kp_driving_initial:
kp_driving_initial = kp_detector(driving_resized)
fake_frame = forward(
source,
driving_resized,
kp_source,
kp_driving_initial,
generator,
kp_detector,
relative=opt.relative,
adapt_scale=opt.adapt_scale,
cpu=opt.cpu
)
cv2.imshow("frame", fake_frame)
#x = np.squeeze(driving_resized, axis=(0,))
#x = driving_resized[0].permute(1, 2, 0)
# plt_driving = driving_resized #permute(2, 3, 1)
#print(plt_driving.shape)
#plt.imshow(x)
#plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| 34.260355 | 142 | 0.68342 |
0ae3d125da916faaaf9490284cbbfda3ebc0f150 | 1,735 | py | Python | soupy/approximations/taylor/backup/__init__.py | cpempire/soupy | 9f65e3329fa126619c893daa4cd80478d83f840c | [
"MIT"
] | 1 | 2021-12-07T15:22:23.000Z | 2021-12-07T15:22:23.000Z | soupy/approximations/taylor/backup/__init__.py | cpempire/soupy | 9f65e3329fa126619c893daa4cd80478d83f840c | [
"MIT"
] | null | null | null | soupy/approximations/taylor/backup/__init__.py | cpempire/soupy | 9f65e3329fa126619c893daa4cd80478d83f840c | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .controlPDEProblem import ControlPDEProblem
from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE
from .costFunctionalConstant import CostFunctionalConstant
from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE
from .costFunctionalLinear import CostFunctionalLinear
from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE
from .costFunctionalQuadratic import CostFunctionalQuadratic
from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE
# from .chanceConstraintQuadratic import ChanceConstraintQuadratic
# from .chanceConstraintLinear import ChanceConstraintLinear
# from .chanceConstraintConstant import ChanceConstraintConstant
# to do list
# 0. implement zero, Hessian term
# 1. implement linear
# 2. implement quadratic
# 3. impelement SAA
# to do list
# 1. SAA does not run well in ccgo1, multiprocessor does not work,
### not clear bug, simplifing adjoint solver works
# 2. quadratic approximation does not converge well, even without variance, does not converge
### record eigenvector after m_tr[i].zero()
# 3. check gradient for quadratic + correction
# what to show tomorrow
# 1. variance reduction by mean square error
# 2. trace estimation by MC and randomized SVD
# 3. scaling with repsect to mesh (design + uncertainty), trace, variance reduction, #bfgs
# 4. show the design and state, for both disk and submarine
# 5. random sample and state at different design
# April 9, 2018, work on reporting results
# 1. random samples and states at different design
# 2. table for variance reduction
# 3. plot trace estimation
# 4. plot #bfgs iterations
# obtain all results as planned | 42.317073 | 93 | 0.821326 |
0ae6683abfd956b5c3952439b03a59e007c9300a | 2,402 | py | Python | models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py | navekshasood/HuBMAP---Hacking-the-Kidney | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | [
"MIT"
] | null | null | null | models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py | navekshasood/HuBMAP---Hacking-the-Kidney | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | [
"MIT"
] | null | null | null | models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py | navekshasood/HuBMAP---Hacking-the-Kidney | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | [
"MIT"
] | null | null | null | import numpy as np
from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,
ShiftScaleRotate, ElasticTransform,
GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,
RandomBrightnessContrast, HueSaturationValue, IAASharpen,
RandomGamma, RandomBrightness, RandomBrightnessContrast,
GaussianBlur,CLAHE,
Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,
Normalize, OneOf, NoOp)
from albumentations.pytorch import ToTensorV2 as ToTensor
from get_config import get_config
config = get_config()
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
| 40.711864 | 113 | 0.572856 |
0ae709052ebf9505470ee0404f1013ba86cb8e0e | 13,017 | py | Python | cubspack/geometry.py | Majikat/cubspack | 16aa6df0603d48d757d74837d3457a1934601d89 | [
"Apache-2.0"
] | 11 | 2018-06-18T12:05:34.000Z | 2021-02-24T19:00:24.000Z | cubspack/geometry.py | Majikat/cubspack | 16aa6df0603d48d757d74837d3457a1934601d89 | [
"Apache-2.0"
] | null | null | null | cubspack/geometry.py | Majikat/cubspack | 16aa6df0603d48d757d74837d3457a1934601d89 | [
"Apache-2.0"
] | 2 | 2018-04-08T17:30:00.000Z | 2018-09-27T08:38:42.000Z | # -*- coding: utf-8 -*-
from math import sqrt
def __eq__(self, other):
"""Equal cuboids have same properties."""
if not isinstance(other, self.__class__):
return False
return (self.width == other.width and
self.height == other.height and
self.depth == other.depth and
self.x == other.x and
self.y == other.y and
self.z == other.z)
def __hash__(self):
return hash(
(self.x, self.y, self.z, self.width, self.height, self.depth))
def __iter__(self):
"""Iterate through cuboid corners"""
yield self.corner_top_l
yield self.corner_top_r
yield self.corner_bot_r
yield self.corner_bot_l
yield self.corner_top_l_out
yield self.corner_top_r_out
yield self.corner_bot_r_out
yield self.corner_bot_l_out
def __repr__(self):
return "R({}, {}, {}, {}, {}, {})".format(
self.x, self.y, self.z, self.width, self.height, self.depth)
def volume(self):
"""Cuboid volume"""
return self.width * self.height * self.depth
def move(self, x, y, z):
"""Move Cuboid to x,y,z coordinates
Arguments:
x (int, float): X coordinate
y (int, float): Y coordinate
z (int, float): Z coordinate
"""
self.x = x
self.y = y
self.z = z
def contains(self, cub):
"""Tests if another cuboid is contained by this one
Arguments:
cub (Cuboid): The other cuboiud
Returns:
bool: True if it is inside this one, False otherwise
"""
return (cub.y >= self.y and
cub.x >= self.x and
cub.z >= self.z and
cub.y + cub.height <= self.y + self.height and
cub.x + cub.width <= self.x + self.width and
cub.z + cub.depth <= self.z + self.depth)
def intersects(self, cub, edges=False):
"""Detect intersections between this cuboid and cub.
Args:
cub (Cuboid): Cuboid to test for intersections.
edges (bool): Accept edge touching cuboids as intersects or not
Returns:
bool: True if the cuboids intersect, False otherwise
"""
# Not even touching
if (self.bottom > cub.top or
self.top < cub.bottom or
self.left > cub.right or
self.right < cub.left or
self.outeye > cub.ineye or
self.ineye < cub.outeye):
return False
# Discard edge intersects
if not edges:
if (self.bottom == cub.top or
self.top == cub.bottom or
self.left == cub.right or
self.right == cub.left or
self.outeye == cub.ineye or
self.ineye == cub.outeye):
return False
# Discard corner intersects
if (self.left == cub.right and self.bottom == cub.top and
self.outeye == cub.ineye or
self.left == cub.right and cub.bottom == self.top and
self.outeye == cub.ineye or
self.left == cub.right and self.bottom == cub.top and
cub.outeye == self.ineye or
self.left == cub.right and cub.bottom == self.top and
cub.outeye == self.ineye or
cub.left == self.right and self.bottom == cub.top and
self.outeye == cub.ineye or
cub.left == self.right and cub.bottom == self.top and
self.outeye == cub.ineye or
cub.left == self.right and self.bottom == cub.top and
cub.outeye == self.ineye or
cub.left == self.right and cub.bottom == self.top and
cub.outeye == self.ineye):
return False
return True
def intersection(self, cub, edges=False):
"""Returns the cuboid resulting of the intersection of this and cub
If the cuboids are only touching by their edges, and the
argument 'edges' is True the cuboid returned will have a volume of 0.
Returns None if there is no intersection.
Arguments:
cub (Cuboid): The other cuboid.
edges (bool): If true, touching edges are considered an
intersection, and a cuboid of 0 height or width or depth will be
returned
Returns:
Cuboid: Intersection.
None: There was no intersection.
"""
if not self.intersects(cub, edges=edges):
return None
bottom = max(self.bottom, cub.bottom)
left = max(self.left, cub.left)
top = min(self.top, cub.top)
right = min(self.right, cub.right)
outeye = max(self.outeye, cub.outeye)
ineye = min(self.ineye, cub.ineye)
return Cuboid(
left, bottom, outeye,
right - left, top - bottom, ineye - outeye)
def join(self, other):
"""Try to join a cuboid to this one.
If the result is also a cuboid and the operation is successful then
this cuboid is modified to the union.
Arguments:
other (Cuboid): Cuboid to join
Returns:
bool: True when successfully joined, False otherwise
"""
if self.contains(other):
return True
if other.contains(self):
self.x = other.x
self.y = other.y
self.z = other.z
self.width = other.width
self.height = other.height
self.depth = other.depth
return True
if not self.intersects(other, edges=True):
return False
# Other cuboid is Up/Down from this
if self.left == other.left and self.width == other.width and \
self.outeye == other.outeye and self.depth == self.depth:
y_min = min(self.bottom, other.bottom)
y_max = max(self.top, other.top)
self.y = y_min
self.height = y_max - y_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.outeye == other.outeye and self.depth == self.depth:
x_min = min(self.left, other.left)
x_max = max(self.right, other.right)
self.x = x_min
self.width = x_max - x_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.left == other.left and self.width == other.width:
z_min = min(self.outeye, other.outeye)
z_max = max(self.ineye, other.ineye)
self.z = z_min
self.depth = z_max - z_min
return True
return False
| 29.517007 | 85 | 0.556657 |
0ae84e0cfa142229ba7d5efbff2238d28b93f418 | 16,661 | py | Python | app/recipe/tests/test_recipe_api.py | tahmadvand/recipe_app_api | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | tahmadvand/recipe_app_api | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | tahmadvand/recipe_app_api | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
# use that for making our API requests
from core.models import Recipe, Tag, Ingredient
from ..serializers import RecipeSerializer, RecipeDetailSerializer
import tempfile
# allows you to call a function which will then create a temp file
# somewhere in the system and then you can remove that file after
# you've used it
import os
# this allows us to perform things like
# creating path names and also checking if files exist on the system
from PIL import Image
# pillow, this will import our image class which will let us then
# create test images which we can then upload to our API
RECIPES_URL = reverse('recipe:recipe-list')
# since we're going to need to access the URL in more
# or less all the tests let's assign that as a variable
# at top of the class in all capitals.
# app : identifier of the URL in the app
# /api/recipe/recipes
# /api/recipe/recipes/1/ (id) --> detail url
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
# generate our upload image url
# you're going to need the existing recipe ID in order to upload an image
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
# name of the end point that the default router will create
# for our viewset because we're going to have a detail action
# this is how you specify arguments with the reverse function
# you just pass in args and then you pass in a list of the
# arguments you want to add
# here we have single item
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
# convert the dictionary into the argument
# when you use the two asterisks when calling a
# function it has the reverse effect.
| 40.43932 | 78 | 0.667547 |
0ae880533e14de2255d5554b8a0bb6b7cbc5e1bb | 1,089 | py | Python | Assignment 1 n 2 Day 8.py | paju3125/LetsUpgrade-Python-B7 | c5767361f60f1ec405ab235af85035e2bb9a71e3 | [
"Apache-2.0"
] | null | null | null | Assignment 1 n 2 Day 8.py | paju3125/LetsUpgrade-Python-B7 | c5767361f60f1ec405ab235af85035e2bb9a71e3 | [
"Apache-2.0"
] | null | null | null | Assignment 1 n 2 Day 8.py | paju3125/LetsUpgrade-Python-B7 | c5767361f60f1ec405ab235af85035e2bb9a71e3 | [
"Apache-2.0"
] | null | null | null | # Assignment 1 Day 8
# write a decorator function for taking input for you
# any kind of function you want to build
addition()
subtraction()
multiplication()
division()
# Assignment 2 day 8
# you need to develop a python program to open a file in read only mode and
# try writing something to it and handlethe subsequent errorusing Exception Handling
try:
f=open("abc.txt","r");
f.write("Heyy, i am prajval");
f.close();
except:
print("File is in read only mode...")
| 22.22449 | 85 | 0.651974 |
0ae8c65cafc822a3267fba35c6ed220e7f320711 | 11,646 | py | Python | gwcs/coordinate_frames.py | migueldvb/gwcs | 4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39 | [
"BSD-3-Clause"
] | null | null | null | gwcs/coordinate_frames.py | migueldvb/gwcs | 4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39 | [
"BSD-3-Clause"
] | null | null | null | gwcs/coordinate_frames.py | migueldvb/gwcs | 4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines coordinate frames and ties them to data axes.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.extern import six
from . import utils as gwutils
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame']
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
def coordinates(self, *args):
""" Create world coordinates object"""
raise NotImplementedError("Subclasses may implement this")
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
reference_position : str
Reference position.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
# Reorder axes if necesary.
try:
return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)
except:
raise
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
"""
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
| 32.713483 | 99 | 0.574618 |
0aea8c17200ee38f7b989cd3fe4ee1c7be72a125 | 4,286 | py | Python | modox/chan_modifier.py | lukpazera/modox | 4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01 | [
"MIT"
] | 11 | 2021-02-19T17:11:04.000Z | 2021-12-03T17:14:58.000Z | modox/chan_modifier.py | lukpazera/modox | 4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01 | [
"MIT"
] | null | null | null | modox/chan_modifier.py | lukpazera/modox | 4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01 | [
"MIT"
] | null | null | null |
import lx
import modo
import select
import item
from run import run
| 28.573333 | 110 | 0.614326 |
0aeade2b44478bdc750fc6e4297d377345ef5136 | 500 | py | Python | brownie_fund_me/scripts/fund_and_withdraw.py | WangCHEN9/solidity_demos | cf28111a1e972ab9dde70f6d3fac22c897d8b660 | [
"MIT"
] | null | null | null | brownie_fund_me/scripts/fund_and_withdraw.py | WangCHEN9/solidity_demos | cf28111a1e972ab9dde70f6d3fac22c897d8b660 | [
"MIT"
] | null | null | null | brownie_fund_me/scripts/fund_and_withdraw.py | WangCHEN9/solidity_demos | cf28111a1e972ab9dde70f6d3fac22c897d8b660 | [
"MIT"
] | null | null | null | from brownie import FundMe
from scripts.helpful_scripts import get_account
if __name__ == "__main__":
main()
| 18.518519 | 58 | 0.654 |
0aeb5c0e9a64382d41d3447557ec9fb64a32a973 | 409 | py | Python | ex019.py | jefernathan/Python | 2f840a625e8d46d41ab36df07ef50ae15a03c5ab | [
"MIT"
] | null | null | null | ex019.py | jefernathan/Python | 2f840a625e8d46d41ab36df07ef50ae15a03c5ab | [
"MIT"
] | null | null | null | ex019.py | jefernathan/Python | 2f840a625e8d46d41ab36df07ef50ae15a03c5ab | [
"MIT"
] | null | null | null | # Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faa um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.
from random import choice
nome1 = input('Digite um nome: ')
nome2 = input('Digite outro nome: ')
nome3 = input('Digite mais um nome: ')
nome4 = input('Digite o ltimo nome: ')
nome = [nome1, nome2, nome3, nome4]
print(choice(nome))
| 34.083333 | 173 | 0.728606 |
0aeb7979679122962a3fff866f48391b6b9c9278 | 489 | py | Python | contacts/admin.py | liviamendes/agenda-django-project | d602bb5e762ea477c3c97b5a475ad79036c0c93d | [
"MIT"
] | null | null | null | contacts/admin.py | liviamendes/agenda-django-project | d602bb5e762ea477c3c97b5a475ad79036c0c93d | [
"MIT"
] | null | null | null | contacts/admin.py | liviamendes/agenda-django-project | d602bb5e762ea477c3c97b5a475ad79036c0c93d | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Categoria, Contact
admin.site.register(Categoria)
admin.site.register(Contact, ContactAdmin)
| 30.5625 | 102 | 0.691207 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.